MI HV CC LOC Filepath Method Code
29095.7294401hadoop
public int run(String[] argv) throws Exception {
    int exitCode = -1;
    if (argv.length < 1) {
        displayUsage("");
        return exitCode;
    }
    String cmd = argv[0];
    String submitJobFile = null;
    String jobid = null;
    String taskid = null;
    String historyFileOrJobId = null;
    String historyOutFile = null;
    String historyOutFormat = HistoryViewer.HUMAN_FORMAT;
    String counterGroupName = null;
    String counterName = null;
    JobPriority jp = null;
    String taskType = null;
    String taskState = null;
    int fromEvent = 0;
    int nEvents = 0;
    int jpvalue = 0;
    String configOutFile = null;
    boolean getStatus = false;
    boolean getCounter = false;
    boolean killJob = false;
    boolean listEvents = false;
    boolean viewHistory = false;
    boolean viewAllHistory = false;
    boolean listJobs = false;
    boolean listAllJobs = false;
    boolean listActiveTrackers = false;
    boolean listBlacklistedTrackers = false;
    boolean displayTasks = false;
    boolean killTask = false;
    boolean failTask = false;
    boolean setJobPriority = false;
    boolean logs = false;
    boolean downloadConfig = false;
    if ("-submit".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        submitJobFile = argv[1];
    } else if ("-status".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        getStatus = true;
    } else if ("-counter".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        getCounter = true;
        jobid = argv[1];
        counterGroupName = argv[2];
        counterName = argv[3];
    } else if ("-kill".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        killJob = true;
    } else if ("-set-priority".equals(cmd)) {
        if (argv.length != 3) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        try {
            jp = JobPriority.valueOf(argv[2]);
        } catch (IllegalArgumentException iae) {
            try {
                jpvalue = Integer.parseInt(argv[2]);
            } catch (NumberFormatException ne) {
                LOG.info("Error number format: ", ne);
                displayUsage(cmd);
                return exitCode;
            }
        }
        setJobPriority = true;
    } else if ("-events".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        fromEvent = Integer.parseInt(argv[2]);
        nEvents = Integer.parseInt(argv[3]);
        listEvents = true;
    } else if ("-history".equals(cmd)) {
        viewHistory = true;
        if (argv.length < 2 || argv.length > 7) {
            displayUsage(cmd);
            return exitCode;
        }
        int index = 1;
        if ("all".equals(argv[index])) {
            index++;
            viewAllHistory = true;
            if (argv.length == 2) {
                displayUsage(cmd);
                return exitCode;
            }
        }
        historyFileOrJobId = argv[index++];
        if (argv.length > index + 1 && "-outfile".equals(argv[index])) {
            index++;
            historyOutFile = argv[index++];
        }
        if (argv.length > index + 1 && "-format".equals(argv[index])) {
            index++;
            historyOutFormat = argv[index++];
        }
        if (argv.length > index) {
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-list".equals(cmd)) {
        if (argv.length != 1 && !(argv.length == 2 && "all".equals(argv[1]))) {
            displayUsage(cmd);
            return exitCode;
        }
        if (argv.length == 2 && "all".equals(argv[1])) {
            listAllJobs = true;
        } else {
            listJobs = true;
        }
    } else if ("-kill-task".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        killTask = true;
        taskid = argv[1];
    } else if ("-fail-task".equals(cmd)) {
        if (argv.length != 2) {
            displayUsage(cmd);
            return exitCode;
        }
        failTask = true;
        taskid = argv[1];
    } else if ("-list-active-trackers".equals(cmd)) {
        if (argv.length != 1) {
            displayUsage(cmd);
            return exitCode;
        }
        listActiveTrackers = true;
    } else if ("-list-blacklisted-trackers".equals(cmd)) {
        if (argv.length != 1) {
            displayUsage(cmd);
            return exitCode;
        }
        listBlacklistedTrackers = true;
    } else if ("-list-attempt-ids".equals(cmd)) {
        if (argv.length != 4) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        taskType = argv[2];
        taskState = argv[3];
        displayTasks = true;
        if (!taskTypes.contains(org.apache.hadoop.util.StringUtils.toUpperCase(taskType))) {
            System.out.println("Error: Invalid task-type: " + taskType);
            displayUsage(cmd);
            return exitCode;
        }
        if (!taskStates.contains(org.apache.hadoop.util.StringUtils.toLowerCase(taskState))) {
            System.out.println("Error: Invalid task-state: " + taskState);
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-logs".equals(cmd)) {
        if (argv.length == 2 || argv.length == 3) {
            logs = true;
            jobid = argv[1];
            if (argv.length == 3) {
                taskid = argv[2];
            } else {
                taskid = null;
            }
        } else {
            displayUsage(cmd);
            return exitCode;
        }
    } else if ("-config".equals(cmd)) {
        downloadConfig = true;
        if (argv.length != 3) {
            displayUsage(cmd);
            return exitCode;
        }
        jobid = argv[1];
        configOutFile = argv[2];
    } else {
        displayUsage(cmd);
        return exitCode;
    }
    cluster = createCluster();
    try {
        if (submitJobFile != null) {
            Job job = Job.getInstance(new JobConf(submitJobFile));
            job.submit();
            System.out.println("Created job " + job.getJobID());
            exitCode = 0;
        } else if (getStatus) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                Counters counters = job.getCounters();
                System.out.println();
                System.out.println(job);
                if (counters != null) {
                    System.out.println(counters);
                } else {
                    System.out.println("Counters not available. Job is retired.");
                }
                exitCode = 0;
            }
        } else if (getCounter) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                Counters counters = job.getCounters();
                if (counters == null) {
                    System.out.println("Counters not available for retired job " + jobid);
                    exitCode = -1;
                } else {
                    System.out.println(getCounter(counters, counterGroupName, counterName));
                    exitCode = 0;
                }
            }
        } else if (killJob) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                JobStatus jobStatus = job.getStatus();
                if (jobStatus.getState() == JobStatus.State.FAILED) {
                    System.out.println("Could not mark the job " + jobid + " as killed, as it has already failed.");
                    exitCode = -1;
                } else if (jobStatus.getState() == JobStatus.State.KILLED) {
                    System.out.println("The job " + jobid + " has already been killed.");
                    exitCode = -1;
                } else if (jobStatus.getState() == JobStatus.State.SUCCEEDED) {
                    System.out.println("Could not kill the job " + jobid + ", as it has already succeeded.");
                    exitCode = -1;
                } else {
                    job.killJob();
                    System.out.println("Killed job " + jobid);
                    exitCode = 0;
                }
            }
        } else if (setJobPriority) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                if (jp != null) {
                    job.setPriority(jp);
                } else {
                    job.setPriorityAsInteger(jpvalue);
                }
                System.out.println("Changed job priority.");
                exitCode = 0;
            }
        } else if (viewHistory) {
            if (historyFileOrJobId.endsWith(".jhist")) {
                viewHistory(historyFileOrJobId, viewAllHistory, historyOutFile, historyOutFormat);
                exitCode = 0;
            } else {
                Job job = getJob(JobID.forName(historyFileOrJobId));
                if (job == null) {
                    System.out.println("Could not find job " + jobid);
                } else {
                    String historyUrl = job.getHistoryUrl();
                    if (historyUrl == null || historyUrl.isEmpty()) {
                        System.out.println("History file for job " + historyFileOrJobId + " is currently unavailable.");
                    } else {
                        viewHistory(historyUrl, viewAllHistory, historyOutFile, historyOutFormat);
                        exitCode = 0;
                    }
                }
            }
        } else if (listEvents) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                listEvents(job, fromEvent, nEvents);
                exitCode = 0;
            }
        } else if (listJobs) {
            listJobs(cluster);
            exitCode = 0;
        } else if (listAllJobs) {
            listAllJobs(cluster);
            exitCode = 0;
        } else if (listActiveTrackers) {
            listActiveTrackers(cluster);
            exitCode = 0;
        } else if (listBlacklistedTrackers) {
            listBlacklistedTrackers(cluster);
            exitCode = 0;
        } else if (displayTasks) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                displayTasks(getJob(JobID.forName(jobid)), taskType, taskState);
                exitCode = 0;
            }
        } else if (killTask) {
            TaskAttemptID taskID = TaskAttemptID.forName(taskid);
            Job job = getJob(taskID.getJobID());
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else if (job.killTask(taskID, false)) {
                System.out.println("Killed task " + taskid);
                exitCode = 0;
            } else {
                System.out.println("Could not kill task " + taskid);
                exitCode = -1;
            }
        } else if (failTask) {
            TaskAttemptID taskID = TaskAttemptID.forName(taskid);
            Job job = getJob(taskID.getJobID());
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else if (job.killTask(taskID, true)) {
                System.out.println("Killed task " + taskID + " by failing it");
                exitCode = 0;
            } else {
                System.out.println("Could not fail task " + taskid);
                exitCode = -1;
            }
        } else if (logs) {
            JobID jobID = JobID.forName(jobid);
            if (getJob(jobID) == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                try {
                    TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
                    LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
                    LogCLIHelpers logDumper = new LogCLIHelpers();
                    logDumper.setConf(getConf());
                    exitCode = logDumper.dumpAContainersLogs(logParams.getApplicationId(), logParams.getContainerId(), logParams.getNodeId(), logParams.getOwner());
                } catch (IOException e) {
                    if (e instanceof RemoteException) {
                        throw e;
                    }
                    System.out.println(e.getMessage());
                }
            }
        } else if (downloadConfig) {
            Job job = getJob(JobID.forName(jobid));
            if (job == null) {
                System.out.println("Could not find job " + jobid);
            } else {
                String jobFile = job.getJobFile();
                if (jobFile == null || jobFile.isEmpty()) {
                    System.out.println("Config file for job " + jobFile + " could not be found.");
                } else {
                    Path configPath = new Path(jobFile);
                    FileSystem fs = FileSystem.get(getConf());
                    fs.copyToLocalFile(configPath, new Path(configOutFile));
                    exitCode = 0;
                }
            }
        }
    } catch (RemoteException re) {
        IOException unwrappedException = re.unwrapRemoteException();
        if (unwrappedException instanceof AccessControlException) {
            System.out.println(unwrappedException.getMessage());
        } else {
            throw re;
        }
    } finally {
        cluster.close();
    }
    return exitCode;
}
611362.55123171elasticsearch
 Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
    DiscoveryNodes nodes = state.getState().nodes();
    String masterId = nodes.getMasterNodeId();
    Table table = getTableWithHeader(req);
    for (DiscoveryNode node : nodes) {
        NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
        NodeStats stats = nodesStats.getNodesMap().get(node.getId());
        JvmInfo jvmInfo = info == null ? null : info.getInfo(JvmInfo.class);
        JvmStats jvmStats = stats == null ? null : stats.getJvm();
        FsInfo fsInfo = stats == null ? null : stats.getFs();
        OsStats osStats = stats == null ? null : stats.getOs();
        ProcessStats processStats = stats == null ? null : stats.getProcess();
        NodeIndicesStats indicesStats = stats == null ? null : stats.getIndices();
        table.startRow();
        table.addCell(fullId ? node.getId() : Strings.substring(node.getId(), 0, 4));
        table.addCell(info == null ? null : info.getInfo(ProcessInfo.class).getId());
        table.addCell(node.getHostAddress());
        table.addCell(node.getAddress().address().getPort());
        final HttpInfo httpInfo = info == null ? null : info.getInfo(HttpInfo.class);
        if (httpInfo != null) {
            TransportAddress transportAddress = httpInfo.getAddress().publishAddress();
            table.addCell(NetworkAddress.format(transportAddress.address()));
        } else {
            table.addCell("-");
        }
        table.addCell(node.getVersion().toString());
        table.addCell(info == null ? null : info.getBuild().type().displayName());
        table.addCell(info == null ? null : info.getBuild().hash());
        table.addCell(jvmInfo == null ? null : jvmInfo.version());
        ByteSizeValue diskTotal = null;
        ByteSizeValue diskUsed = null;
        ByteSizeValue diskAvailable = null;
        RestTable.FormattedDouble diskUsedPercent = null;
        if (fsInfo != null) {
            diskTotal = fsInfo.getTotal().getTotal();
            diskAvailable = fsInfo.getTotal().getAvailable();
            diskUsed = ByteSizeValue.ofBytes(diskTotal.getBytes() - diskAvailable.getBytes());
            double diskUsedRatio = diskTotal.getBytes() == 0 ? 1.0 : (double) diskUsed.getBytes() / diskTotal.getBytes();
            diskUsedPercent = RestTable.FormattedDouble.format2DecimalPlaces(100.0 * diskUsedRatio);
        }
        table.addCell(diskTotal);
        table.addCell(diskUsed);
        table.addCell(diskAvailable);
        table.addCell(diskUsedPercent);
        table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsed());
        table.addCell(jvmStats == null ? null : jvmStats.getMem().getHeapUsedPercent());
        table.addCell(jvmInfo == null ? null : jvmInfo.getMem().getHeapMax());
        table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsed());
        table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getUsedPercent());
        table.addCell(osStats == null ? null : osStats.getMem() == null ? null : osStats.getMem().getTotal());
        table.addCell(processStats == null ? null : processStats.getOpenFileDescriptors());
        table.addCell(processStats == null ? null : calculatePercentage(processStats.getOpenFileDescriptors(), processStats.getMaxFileDescriptors()));
        table.addCell(processStats == null ? null : processStats.getMaxFileDescriptors());
        table.addCell(osStats == null ? null : Short.toString(osStats.getCpu().getPercent()));
        boolean hasLoadAverage = osStats != null && osStats.getCpu().getLoadAverage() != null;
        table.addCell(hasLoadAverage == false || osStats.getCpu().getLoadAverage()[0] == -1 ? null : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[0]));
        table.addCell(hasLoadAverage == false || osStats.getCpu().getLoadAverage()[1] == -1 ? null : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[1]));
        table.addCell(hasLoadAverage == false || osStats.getCpu().getLoadAverage()[2] == -1 ? null : RestTable.FormattedDouble.format2DecimalPlaces(osStats.getCpu().getLoadAverage()[2]));
        table.addCell(jvmStats == null ? null : jvmStats.getUptime());
        table.addCell(node.getRoleAbbreviationString());
        table.addCell(masterId == null ? "x" : masterId.equals(node.getId()) ? "*" : "-");
        table.addCell(node.getName());
        CompletionStats completionStats = indicesStats == null ? null : stats.getIndices().getCompletion();
        table.addCell(completionStats == null ? null : completionStats.getSize());
        FieldDataStats fdStats = indicesStats == null ? null : stats.getIndices().getFieldData();
        table.addCell(fdStats == null ? null : fdStats.getMemorySize());
        table.addCell(fdStats == null ? null : fdStats.getEvictions());
        QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getQueryCache();
        table.addCell(fcStats == null ? null : fcStats.getMemorySize());
        table.addCell(fcStats == null ? null : fcStats.getEvictions());
        table.addCell(fcStats == null ? null : fcStats.getHitCount());
        table.addCell(fcStats == null ? null : fcStats.getMissCount());
        RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getRequestCache();
        table.addCell(qcStats == null ? null : qcStats.getMemorySize());
        table.addCell(qcStats == null ? null : qcStats.getEvictions());
        table.addCell(qcStats == null ? null : qcStats.getHitCount());
        table.addCell(qcStats == null ? null : qcStats.getMissCount());
        FlushStats flushStats = indicesStats == null ? null : indicesStats.getFlush();
        table.addCell(flushStats == null ? null : flushStats.getTotal());
        table.addCell(flushStats == null ? null : flushStats.getTotalTime());
        GetStats getStats = indicesStats == null ? null : indicesStats.getGet();
        table.addCell(getStats == null ? null : getStats.current());
        table.addCell(getStats == null ? null : getStats.getTime());
        table.addCell(getStats == null ? null : getStats.getCount());
        table.addCell(getStats == null ? null : getStats.getExistsTime());
        table.addCell(getStats == null ? null : getStats.getExistsCount());
        table.addCell(getStats == null ? null : getStats.getMissingTime());
        table.addCell(getStats == null ? null : getStats.getMissingCount());
        IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing();
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCount());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCurrent());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexTime());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexCount());
        table.addCell(indexingStats == null ? null : indexingStats.getTotal().getIndexFailedCount());
        MergeStats mergeStats = indicesStats == null ? null : indicesStats.getMerge();
        table.addCell(mergeStats == null ? null : mergeStats.getCurrent());
        table.addCell(mergeStats == null ? null : mergeStats.getCurrentNumDocs());
        table.addCell(mergeStats == null ? null : mergeStats.getCurrentSize());
        table.addCell(mergeStats == null ? null : mergeStats.getTotal());
        table.addCell(mergeStats == null ? null : mergeStats.getTotalNumDocs());
        table.addCell(mergeStats == null ? null : mergeStats.getTotalSize());
        table.addCell(mergeStats == null ? null : mergeStats.getTotalTime());
        RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh();
        table.addCell(refreshStats == null ? null : refreshStats.getTotal());
        table.addCell(refreshStats == null ? null : refreshStats.getTotalTime());
        table.addCell(refreshStats == null ? null : refreshStats.getExternalTotal());
        table.addCell(refreshStats == null ? null : refreshStats.getExternalTotalTime());
        table.addCell(refreshStats == null ? null : refreshStats.getListeners());
        ScriptStats scriptStats = stats == null ? null : stats.getScriptStats();
        table.addCell(scriptStats == null ? null : scriptStats.getCompilations());
        table.addCell(scriptStats == null ? null : scriptStats.getCacheEvictions());
        table.addCell(scriptStats == null ? null : scriptStats.getCompilationLimitTriggered());
        SearchStats searchStats = indicesStats == null ? null : indicesStats.getSearch();
        table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCurrent());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchTime());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getFetchCount());
        table.addCell(searchStats == null ? null : searchStats.getOpenContexts());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount());
        SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments();
        table.addCell(segmentsStats == null ? null : segmentsStats.getCount());
        table.addCell(segmentsStats == null ? null : ByteSizeValue.ZERO);
        table.addCell(segmentsStats == null ? null : segmentsStats.getIndexWriterMemory());
        table.addCell(segmentsStats == null ? null : segmentsStats.getVersionMapMemory());
        table.addCell(segmentsStats == null ? null : segmentsStats.getBitsetMemory());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCurrent());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestTime());
        table.addCell(searchStats == null ? null : searchStats.getTotal().getSuggestCount());
        BulkStats bulkStats = indicesStats == null ? null : indicesStats.getBulk();
        table.addCell(bulkStats == null ? null : bulkStats.getTotalOperations());
        table.addCell(bulkStats == null ? null : bulkStats.getTotalTime());
        table.addCell(bulkStats == null ? null : bulkStats.getTotalSizeInBytes());
        table.addCell(bulkStats == null ? null : bulkStats.getAvgTime());
        table.addCell(bulkStats == null ? null : bulkStats.getAvgSizeInBytes());
        ShardCountStats shardCountStats = indicesStats == null ? null : indicesStats.getShardCount();
        table.addCell(shardCountStats == null ? null : shardCountStats.getTotalCount());
        NodeMappingStats nodeMappingStats = indicesStats == null ? null : indicesStats.getNodeMappingStats();
        table.addCell(nodeMappingStats == null ? null : nodeMappingStats.getTotalCount());
        table.addCell(nodeMappingStats == null ? null : nodeMappingStats.getTotalEstimatedOverhead());
        table.endRow();
    }
    return table;
}
62542.36108327hadoop
public void incInvokedMethod(Method method) {
    switch(method.getName()) {
        case "getBlockLocations":
            getBlockLocationsOps.incr();
            break;
        case "getServerDefaults":
            getServerDefaultsOps.incr();
            break;
        case "create":
            createOps.incr();
            break;
        case "append":
            appendOps.incr();
            break;
        case "recoverLease":
            recoverLeaseOps.incr();
            break;
        case "setReplication":
            setReplicationOps.incr();
            break;
        case "setStoragePolicy":
            setStoragePolicyOps.incr();
            break;
        case "getStoragePolicies":
            getStoragePoliciesOps.incr();
            break;
        case "setPermission":
            setPermissionOps.incr();
            break;
        case "setOwner":
            setOwnerOps.incr();
            break;
        case "addBlock":
            addBlockOps.incr();
            break;
        case "getAdditionalDatanode":
            getAdditionalDatanodeOps.incr();
            break;
        case "abandonBlock":
            abandonBlockOps.incr();
            break;
        case "complete":
            completeOps.incr();
            break;
        case "updateBlockForPipeline":
            updateBlockForPipelineOps.incr();
            break;
        case "updatePipeline":
            updatePipelineOps.incr();
            break;
        case "getPreferredBlockSize":
            getPreferredBlockSizeOps.incr();
            break;
        case "rename":
            renameOps.incr();
            break;
        case "rename2":
            rename2Ops.incr();
            break;
        case "concat":
            concatOps.incr();
            break;
        case "truncate":
            truncateOps.incr();
            break;
        case "delete":
            deleteOps.incr();
            break;
        case "mkdirs":
            mkdirsOps.incr();
            break;
        case "renewLease":
            renewLeaseOps.incr();
            break;
        case "getListing":
            getListingOps.incr();
            break;
        case "getBatchedListing":
            getBatchedListingOps.incr();
            break;
        case "getFileInfo":
            getFileInfoOps.incr();
            break;
        case "isFileClosed":
            isFileClosedOps.incr();
            break;
        case "getFileLinkInfo":
            getFileLinkInfoOps.incr();
            break;
        case "getLocatedFileInfo":
            getLocatedFileInfoOps.incr();
            break;
        case "getStats":
            getStatsOps.incr();
            break;
        case "getDatanodeReport":
            getDatanodeReportOps.incr();
            break;
        case "getDatanodeStorageReport":
            getDatanodeStorageReportOps.incr();
            break;
        case "setSafeMode":
            setSafeModeOps.incr();
            break;
        case "restoreFailedStorage":
            restoreFailedStorageOps.incr();
            break;
        case "saveNamespace":
            saveNamespaceOps.incr();
            break;
        case "rollEdits":
            rollEditsOps.incr();
            break;
        case "refreshNodes":
            refreshNodesOps.incr();
            break;
        case "finalizeUpgrade":
            finalizeUpgradeOps.incr();
            break;
        case "upgradeStatus":
            upgradeStatusOps.incr();
            break;
        case "rollingUpgrade":
            rollingUpgradeOps.incr();
            break;
        case "metaSave":
            metaSaveOps.incr();
            break;
        case "listCorruptFileBlocks":
            listCorruptFileBlocksOps.incr();
            break;
        case "setBalancerBandwidth":
            setBalancerBandwidthOps.incr();
            break;
        case "getContentSummary":
            getContentSummaryOps.incr();
            break;
        case "fsync":
            fsyncOps.incr();
            break;
        case "setTimes":
            setTimesOps.incr();
            break;
        case "createSymlink":
            createSymlinkOps.incr();
            break;
        case "getLinkTarget":
            getLinkTargetOps.incr();
            break;
        case "allowSnapshot":
            allowSnapshotOps.incr();
            break;
        case "disallowSnapshot":
            disallowSnapshotOps.incr();
            break;
        case "renameSnapshot":
            renameSnapshotOps.incr();
            break;
        case "getSnapshottableDirListing":
            getSnapshottableDirListingOps.incr();
            break;
        case "getSnapshotListing":
            getSnapshotListingOps.incr();
            break;
        case "getSnapshotDiffReport":
            getSnapshotDiffReportOps.incr();
            break;
        case "getSnapshotDiffReportListing":
            getSnapshotDiffReportListingOps.incr();
            break;
        case "addCacheDirective":
            addCacheDirectiveOps.incr();
            break;
        case "modifyCacheDirective":
            modifyCacheDirectiveOps.incr();
            break;
        case "removeCacheDirective":
            removeCacheDirectiveOps.incr();
            break;
        case "listCacheDirectives":
            listCacheDirectivesOps.incr();
            break;
        case "addCachePool":
            addCachePoolOps.incr();
            break;
        case "modifyCachePool":
            modifyCachePoolOps.incr();
            break;
        case "removeCachePool":
            removeCachePoolOps.incr();
            break;
        case "listCachePools":
            listCachePoolsOps.incr();
            break;
        case "modifyAclEntries":
            modifyAclEntriesOps.incr();
            break;
        case "removeAclEntries":
            removeAclEntriesOps.incr();
            break;
        case "removeDefaultAcl":
            removeDefaultAclOps.incr();
            break;
        case "removeAcl":
            removeAclOps.incr();
            break;
        case "setAcl":
            setAclOps.incr();
            break;
        case "getAclStatus":
            getAclStatusOps.incr();
            break;
        case "createEncryptionZone":
            createEncryptionZoneOps.incr();
            break;
        case "getEZForPath":
            getEZForPathOps.incr();
            break;
        case "listEncryptionZones":
            listEncryptionZonesOps.incr();
            break;
        case "reencryptEncryptionZone":
            reencryptEncryptionZoneOps.incr();
            break;
        case "listReencryptionStatus":
            listReencryptionStatusOps.incr();
            break;
        case "setXAttr":
            setXAttrOps.incr();
            break;
        case "getXAttrs":
            getXAttrsOps.incr();
            break;
        case "listXAttrs":
            listXAttrsOps.incr();
            break;
        case "removeXAttr":
            removeXAttrsOps.incr();
            break;
        case "checkAccess":
            checkAccessOps.incr();
            break;
        case "getCurrentEditLogTxid":
            getCurrentEditLogTxidOps.incr();
            break;
        case "getEditsFromTxid":
            getEditsFromTxidOps.incr();
            break;
        case "getDataEncryptionKey":
            getDataEncryptionKeyOps.incr();
            break;
        case "createSnapshot":
            createSnapshotOps.incr();
            break;
        case "deleteSnapshot":
            deleteSnapshotOps.incr();
            break;
        case "setQuota":
            setQuotaOps.incr();
            break;
        case "getQuotaUsage":
            getQuotaUsageOps.incr();
            break;
        case "reportBadBlocks":
            reportBadBlocksOps.incr();
            break;
        case "unsetStoragePolicy":
            unsetStoragePolicyOps.incr();
            break;
        case "getStoragePolicy":
            getStoragePolicyOps.incr();
            break;
        case "getErasureCodingPolicies":
            getErasureCodingPoliciesOps.incr();
            break;
        case "getErasureCodingCodecs":
            getErasureCodingCodecsOps.incr();
            break;
        case "addErasureCodingPolicies":
            addErasureCodingPoliciesOps.incr();
            break;
        case "removeErasureCodingPolicy":
            removeErasureCodingPolicyOps.incr();
            break;
        case "disableErasureCodingPolicy":
            disableErasureCodingPolicyOps.incr();
            break;
        case "enableErasureCodingPolicy":
            enableErasureCodingPolicyOps.incr();
            break;
        case "getErasureCodingPolicy":
            getErasureCodingPolicyOps.incr();
            break;
        case "setErasureCodingPolicy":
            setErasureCodingPolicyOps.incr();
            break;
        case "unsetErasureCodingPolicy":
            unsetErasureCodingPolicyOps.incr();
            break;
        case "getECTopologyResultForPolicies":
            getECTopologyResultForPoliciesOps.incr();
            break;
        case "getECBlockGroupStats":
            getECBlockGroupStatsOps.incr();
            break;
        case "getReplicatedBlockStats":
            getReplicatedBlockStatsOps.incr();
            break;
        case "listOpenFiles":
            listOpenFilesOps.incr();
            break;
        case "msync":
            msyncOps.incr();
            break;
        case "satisfyStoragePolicy":
            satisfyStoragePolicyOps.incr();
            break;
        case "getHAServiceState":
            getHAServiceStateOps.incr();
            break;
        case "getSlowDatanodeReport":
            getSlowDatanodeReportOps.incr();
            break;
        default:
            otherOps.incr();
    }
}
77455.5167382hadoop
private Configuration handleOptions(ParsedOutput opts, Configuration base) throws ConfigException {
    ConfigExtractor extractor = new ConfigExtractor(base);
    {
        Integer mapAmount = null;
        try {
            mapAmount = extractor.getMapAmount(opts.getValue(ConfigOption.MAPS.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging map amount", e);
        }
        if (mapAmount != null) {
            if (mapAmount <= 0) {
                throw new ConfigException("Map amount can not be less than or equal to zero");
            }
            base.set(ConfigOption.MAPS.getCfgOption(), mapAmount.toString());
        }
    }
    {
        Integer reduceAmount = null;
        try {
            reduceAmount = extractor.getMapAmount(opts.getValue(ConfigOption.REDUCES.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging reducer amount", e);
        }
        if (reduceAmount != null) {
            if (reduceAmount <= 0) {
                throw new ConfigException("Reducer amount can not be less than or equal to zero");
            }
            base.set(ConfigOption.REDUCES.getCfgOption(), reduceAmount.toString());
        }
    }
    {
        Integer duration = null;
        try {
            duration = extractor.getDuration(opts.getValue(ConfigOption.DURATION.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging duration", e);
        }
        if (duration != null) {
            if (duration <= 0) {
                throw new ConfigException("Duration can not be less than or equal to zero");
            }
            base.set(ConfigOption.DURATION.getCfgOption(), duration.toString());
        }
    }
    {
        Integer operationAmount = null;
        try {
            operationAmount = extractor.getOpCount(opts.getValue(ConfigOption.OPS.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging operation amount", e);
        }
        if (operationAmount != null) {
            if (operationAmount <= 0) {
                throw new ConfigException("Operation amount can not be less than or equal to zero");
            }
            base.set(ConfigOption.OPS.getCfgOption(), operationAmount.toString());
        }
    }
    {
        try {
            boolean exitOnError = extractor.shouldExitOnFirstError(opts.getValue(ConfigOption.EXIT_ON_ERROR.getOpt()));
            base.setBoolean(ConfigOption.EXIT_ON_ERROR.getCfgOption(), exitOnError);
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging exit on error value", e);
        }
    }
    {
        try {
            boolean waitOnTruncate = extractor.shouldWaitOnTruncate(opts.getValue(ConfigOption.TRUNCATE_WAIT.getOpt()));
            base.setBoolean(ConfigOption.TRUNCATE_WAIT.getCfgOption(), waitOnTruncate);
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging wait on truncate value", e);
        }
    }
    {
        Integer fileAm = null;
        try {
            fileAm = extractor.getTotalFiles(opts.getValue(ConfigOption.FILES.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging total file limit amount", e);
        }
        if (fileAm != null) {
            if (fileAm <= 0) {
                throw new ConfigException("File amount can not be less than or equal to zero");
            }
            base.set(ConfigOption.FILES.getCfgOption(), fileAm.toString());
        }
    }
    {
        try {
            String qname = extractor.getQueueName(opts.getValue(ConfigOption.QUEUE_NAME.getOpt()));
            if (qname != null) {
                base.set(ConfigOption.QUEUE_NAME.getCfgOption(), qname);
            }
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging queue name", e);
        }
    }
    {
        Integer directoryLimit = null;
        try {
            directoryLimit = extractor.getDirSize(opts.getValue(ConfigOption.DIR_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging directory file limit", e);
        }
        if (directoryLimit != null) {
            if (directoryLimit <= 0) {
                throw new ConfigException("Directory file limit can not be less than or equal to zero");
            }
            base.set(ConfigOption.DIR_SIZE.getCfgOption(), directoryLimit.toString());
        }
    }
    {
        Path basedir = null;
        try {
            basedir = extractor.getBaseDirectory(opts.getValue(ConfigOption.BASE_DIR.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging base directory", e);
        }
        if (basedir != null) {
            basedir = new Path(basedir, Constants.BASE_DIR);
            base.set(ConfigOption.BASE_DIR.getCfgOption(), basedir.toString());
        }
    }
    {
        String fn = null;
        try {
            fn = extractor.getResultFile(opts.getValue(ConfigOption.RESULT_FILE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging result file", e);
        }
        if (fn != null) {
            base.set(ConfigOption.RESULT_FILE.getCfgOption(), fn);
        }
    }
    {
        String fn = null;
        try {
            fn = extractor.getResultFile(opts.getValue(ConfigOption.RESULT_FILE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging result file", e);
        }
        if (fn != null) {
            base.set(ConfigOption.RESULT_FILE.getCfgOption(), fn);
        }
    }
    {
        try {
            base = handleOperations(opts, base, extractor);
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging operations", e);
        }
    }
    {
        Range<Short> replicationAm = null;
        try {
            replicationAm = extractor.getReplication(opts.getValue(ConfigOption.REPLICATION_AM.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging replication amount range", e);
        }
        if (replicationAm != null) {
            int minRepl = base.getInt(Constants.MIN_REPLICATION, 1);
            if (replicationAm.getLower() < minRepl) {
                throw new ConfigException("Replication amount minimum is less than property configured minimum " + minRepl);
            }
            if (replicationAm.getLower() > replicationAm.getUpper()) {
                throw new ConfigException("Replication amount minimum is greater than its maximum");
            }
            if (replicationAm.getLower() <= 0) {
                throw new ConfigException("Replication amount minimum must be greater than zero");
            }
            base.set(ConfigOption.REPLICATION_AM.getCfgOption(), replicationAm.toString());
        }
    }
    {
        Range<Long> sleepRange = null;
        try {
            sleepRange = extractor.getSleepRange(opts.getValue(ConfigOption.SLEEP_TIME.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging sleep size range", e);
        }
        if (sleepRange != null) {
            if (sleepRange.getLower() > sleepRange.getUpper()) {
                throw new ConfigException("Sleep range minimum is greater than its maximum");
            }
            if (sleepRange.getLower() <= 0) {
                throw new ConfigException("Sleep range minimum must be greater than zero");
            }
            base.set(ConfigOption.SLEEP_TIME.getCfgOption(), sleepRange.toString());
        }
    }
    {
        String pSize = opts.getValue(ConfigOption.PACKET_SIZE.getOpt());
        if (pSize == null) {
            pSize = ConfigOption.PACKET_SIZE.getDefault();
        }
        if (pSize != null) {
            try {
                Long packetSize = StringUtils.TraditionalBinaryPrefix.string2long(pSize);
                base.set(ConfigOption.PACKET_SIZE.getCfgOption(), packetSize.toString());
            } catch (Exception e) {
                throw new ConfigException("Error extracting & merging write packet size", e);
            }
        }
    }
    {
        Range<Long> blockSize = null;
        try {
            blockSize = extractor.getBlockSize(opts.getValue(ConfigOption.BLOCK_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging block size range", e);
        }
        if (blockSize != null) {
            if (blockSize.getLower() > blockSize.getUpper()) {
                throw new ConfigException("Block size minimum is greater than its maximum");
            }
            if (blockSize.getLower() <= 0) {
                throw new ConfigException("Block size minimum must be greater than zero");
            }
            Long bytesPerChecksum = extractor.getByteCheckSum();
            if (bytesPerChecksum != null) {
                if ((blockSize.getLower() % bytesPerChecksum) != 0) {
                    throw new ConfigException("Blocksize lower bound must be a multiple of " + bytesPerChecksum);
                }
                if ((blockSize.getUpper() % bytesPerChecksum) != 0) {
                    throw new ConfigException("Blocksize upper bound must be a multiple of " + bytesPerChecksum);
                }
            }
            base.set(ConfigOption.BLOCK_SIZE.getCfgOption(), blockSize.toString());
        }
    }
    {
        Range<Long> readSize = null;
        try {
            readSize = extractor.getReadSize(opts.getValue(ConfigOption.READ_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging read size range", e);
        }
        if (readSize != null) {
            if (readSize.getLower() > readSize.getUpper()) {
                throw new ConfigException("Read size minimum is greater than its maximum");
            }
            if (readSize.getLower() < 0) {
                throw new ConfigException("Read size minimum must be greater than or equal to zero");
            }
            base.set(ConfigOption.READ_SIZE.getCfgOption(), readSize.toString());
        }
    }
    {
        Range<Long> writeSize = null;
        try {
            writeSize = extractor.getWriteSize(opts.getValue(ConfigOption.WRITE_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging write size range", e);
        }
        if (writeSize != null) {
            if (writeSize.getLower() > writeSize.getUpper()) {
                throw new ConfigException("Write size minimum is greater than its maximum");
            }
            if (writeSize.getLower() < 0) {
                throw new ConfigException("Write size minimum must be greater than or equal to zero");
            }
            base.set(ConfigOption.WRITE_SIZE.getCfgOption(), writeSize.toString());
        }
    }
    {
        Range<Long> appendSize = null;
        try {
            appendSize = extractor.getAppendSize(opts.getValue(ConfigOption.APPEND_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging append size range", e);
        }
        if (appendSize != null) {
            if (appendSize.getLower() > appendSize.getUpper()) {
                throw new ConfigException("Append size minimum is greater than its maximum");
            }
            if (appendSize.getLower() < 0) {
                throw new ConfigException("Append size minimum must be greater than or equal to zero");
            }
            base.set(ConfigOption.APPEND_SIZE.getCfgOption(), appendSize.toString());
        }
    }
    {
        Range<Long> truncateSize = null;
        try {
            truncateSize = extractor.getTruncateSize(opts.getValue(ConfigOption.TRUNCATE_SIZE.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging truncate size range", e);
        }
        if (truncateSize != null) {
            if (truncateSize.getLower() > truncateSize.getUpper()) {
                throw new ConfigException("Truncate size minimum is greater than its maximum");
            }
            if (truncateSize.getLower() < 0) {
                throw new ConfigException("Truncate size minimum must be greater than or equal to zero");
            }
            base.set(ConfigOption.TRUNCATE_SIZE.getCfgOption(), truncateSize.toString());
        }
    }
    {
        Long seed = null;
        try {
            seed = extractor.getRandomSeed(opts.getValue(ConfigOption.RANDOM_SEED.getOpt()));
        } catch (Exception e) {
            throw new ConfigException("Error extracting & merging random number seed", e);
        }
        if (seed != null) {
            base.set(ConfigOption.RANDOM_SEED.getCfgOption(), seed.toString());
        }
    }
    return base;
}
96941.9981250elasticsearch
public void testToXContent() throws Exception {
    String from = randomBoolean() ? null : randomAlphaOfLength(10);
    String[] to = rarely() ? null : new String[randomIntBetween(0, 2)];
    if (to != null) {
        for (int i = 0; i < to.length; i++) {
            to[i] = randomAlphaOfLength(10);
        }
    }
    String icon = randomBoolean() ? null : randomAlphaOfLength(10);
    String text = randomBoolean() ? null : randomAlphaOfLength(50);
    Attachment[] attachments = (text != null && randomBoolean()) ? null : new Attachment[randomIntBetween(0, 2)];
    if (attachments != null) {
        for (int i = 0; i < attachments.length; i++) {
            String fallback = randomBoolean() ? null : randomAlphaOfLength(10);
            String color = randomBoolean() ? null : randomAlphaOfLength(10);
            String pretext = randomBoolean() ? null : randomAlphaOfLength(10);
            String authorName = randomBoolean() ? null : randomAlphaOfLength(10);
            String authorLink = authorName == null || randomBoolean() ? null : randomAlphaOfLength(10);
            String authorIcon = authorName == null || randomBoolean() ? null : randomAlphaOfLength(10);
            String title = randomBoolean() ? null : randomAlphaOfLength(10);
            String titleLink = title == null || randomBoolean() ? null : randomAlphaOfLength(10);
            String attachmentText = randomBoolean() ? null : randomAlphaOfLength(10);
            Field[] fields = randomBoolean() ? null : new Field[randomIntBetween(0, 2)];
            if (fields != null) {
                for (int j = 0; j < fields.length; j++) {
                    fields[j] = new Field(randomAlphaOfLength(10), randomAlphaOfLength(10), randomBoolean());
                }
            }
            String imageUrl = randomBoolean() ? null : randomAlphaOfLength(10);
            String thumbUrl = randomBoolean() ? null : randomAlphaOfLength(10);
            String[] markdownFields = randomBoolean() ? null : new String[] { "pretext" };
            List<Action> actions = new ArrayList<>();
            if (randomBoolean()) {
                actions.add(new Action("primary", "action_name", "button", "action_text", "https://elastic.co"));
            }
            attachments[i] = new Attachment(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, attachmentText, fields, imageUrl, thumbUrl, markdownFields, actions);
        }
    }
    SlackMessage expected = new SlackMessage(from, to, icon, text, attachments);
    boolean includeTarget = randomBoolean();
    XContentBuilder builder = jsonBuilder();
    builder.startObject();
    writeFieldIfNotNull(builder, "from", from);
    if (includeTarget) {
        writeFieldIfNotNull(builder, "to", to);
    }
    writeFieldIfNotNull(builder, "icon", icon);
    writeFieldIfNotNull(builder, "text", text);
    if (attachments != null) {
        builder.startArray("attachments");
        for (Attachment attachment : attachments) {
            builder.startObject();
            writeFieldIfNotNull(builder, "fallback", attachment.fallback);
            writeFieldIfNotNull(builder, "color", attachment.color);
            writeFieldIfNotNull(builder, "pretext", attachment.pretext);
            writeFieldIfNotNull(builder, "author_name", attachment.authorName);
            writeFieldIfNotNull(builder, "author_link", attachment.authorLink);
            writeFieldIfNotNull(builder, "author_icon", attachment.authorIcon);
            writeFieldIfNotNull(builder, "title", attachment.title);
            writeFieldIfNotNull(builder, "title_link", attachment.titleLink);
            writeFieldIfNotNull(builder, "text", attachment.text);
            if (attachment.fields != null) {
                builder.startArray("fields");
                for (Field field : attachment.fields) {
                    builder.startObject();
                    builder.field("title", field.title);
                    builder.field("value", field.value);
                    builder.field("short", field.isShort);
                    builder.endObject();
                }
                builder.endArray();
            }
            if (attachment.actions.isEmpty() == false) {
                builder.startArray("actions");
                for (Action action : attachment.actions) {
                    action.toXContent(builder, ToXContent.EMPTY_PARAMS);
                }
                builder.endArray();
            }
            builder.endObject();
        }
        builder.endArray();
    }
    builder.endObject();
    builder = jsonBuilder();
    if (includeTarget && randomBoolean()) {
        expected.toXContent(builder, ToXContent.EMPTY_PARAMS);
    } else {
        expected.toXContent(builder, ToXContent.EMPTY_PARAMS, includeTarget);
    }
    XContentParser parser = createParser(builder);
    parser.nextToken();
    from = null;
    to = null;
    icon = null;
    text = null;
    attachments = null;
    String currentFieldName = null;
    XContentParser.Token token = parser.currentToken();
    assertThat(token, is(XContentParser.Token.START_OBJECT));
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if ("from".equals(currentFieldName)) {
            from = parser.text();
        } else if ("to".equals(currentFieldName)) {
            List<String> list = new ArrayList<>();
            while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                list.add(parser.text());
            }
            to = list.toArray(new String[list.size()]);
        } else if ("icon".equals(currentFieldName)) {
            icon = parser.text();
        } else if ("text".equals(currentFieldName)) {
            text = parser.text();
        } else if ("attachments".equals(currentFieldName)) {
            List<Attachment> list = new ArrayList<>();
            while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                assertThat(token, is(XContentParser.Token.START_OBJECT));
                String fallback = null;
                String color = null;
                String pretext = null;
                String authorName = null;
                String authorLink = null;
                String authorIcon = null;
                String title = null;
                String titleLink = null;
                String attachmentText = null;
                Field[] fields = null;
                String imageUrl = null;
                String thumbUrl = null;
                String[] markdownSupportedFields = null;
                List<Action> actions = new ArrayList<>();
                while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                    if (token == XContentParser.Token.FIELD_NAME) {
                        currentFieldName = parser.currentName();
                    } else if ("fallback".equals(currentFieldName)) {
                        fallback = parser.text();
                    } else if ("color".equals(currentFieldName)) {
                        color = parser.text();
                    } else if ("pretext".equals(currentFieldName)) {
                        pretext = parser.text();
                    } else if ("author_name".equals(currentFieldName)) {
                        authorName = parser.text();
                    } else if ("author_link".equals(currentFieldName)) {
                        authorLink = parser.text();
                    } else if ("author_icon".equals(currentFieldName)) {
                        authorIcon = parser.text();
                    } else if ("title".equals(currentFieldName)) {
                        title = parser.text();
                    } else if ("title_link".equals(currentFieldName)) {
                        titleLink = parser.text();
                    } else if ("text".equals(currentFieldName)) {
                        attachmentText = parser.text();
                    } else if ("fields".equals(currentFieldName)) {
                        List<Field> fieldList = new ArrayList<>();
                        while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                            assertThat(token, is(XContentParser.Token.START_OBJECT));
                            String fieldTitle = null;
                            String fieldValue = null;
                            boolean isShort = false;
                            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                                if (token == XContentParser.Token.FIELD_NAME) {
                                    currentFieldName = parser.currentName();
                                } else if ("title".equals(currentFieldName)) {
                                    fieldTitle = parser.text();
                                } else if ("value".equals(currentFieldName)) {
                                    fieldValue = parser.text();
                                } else if ("short".equals(currentFieldName)) {
                                    isShort = parser.booleanValue();
                                }
                            }
                            fieldList.add(new Field(fieldTitle, fieldValue, isShort));
                        }
                        fields = fieldList.toArray(new Field[fieldList.size()]);
                    } else if ("actions".equals(currentFieldName)) {
                        MockTextTemplateEngine engine = new MockTextTemplateEngine();
                        while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                            Action.Template action = new Action.Template();
                            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                                if (token == XContentParser.Token.FIELD_NAME) {
                                    currentFieldName = parser.currentName();
                                } else if (token.isValue()) {
                                    switch(currentFieldName) {
                                        case "url" ->
                                            action.setUrl(new TextTemplate(parser.text()));
                                        case "name" ->
                                            action.setName(new TextTemplate(parser.text()));
                                        case "style" ->
                                            action.setStyle(new TextTemplate(parser.text()));
                                        case "text" ->
                                            action.setText(new TextTemplate(parser.text()));
                                        case "type" ->
                                            action.setType(new TextTemplate(parser.text()));
                                    }
                                }
                            }
                            actions.add(action.render(engine, Collections.emptyMap()));
                        }
                    } else if ("image_url".equals(currentFieldName)) {
                        imageUrl = parser.text();
                    } else if ("thumb_url".equals(currentFieldName)) {
                        thumbUrl = parser.text();
                    } else if ("mrkdwn_in".equals(currentFieldName)) {
                        List<String> data = new ArrayList<>();
                        while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                            data.add(parser.text());
                        }
                        markdownSupportedFields = data.toArray(new String[] {});
                    }
                }
                list.add(new Attachment(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, attachmentText, fields, imageUrl, thumbUrl, markdownSupportedFields, actions));
            }
            attachments = list.toArray(new Attachment[list.size()]);
        }
    }
    if (includeTarget == false) {
        assertThat(to, nullValue());
        to = expected.to;
    }
    SlackMessage actual = new SlackMessage(from, to, icon, text, attachments);
    assertThat(actual, equalTo(expected));
}
97995.0759350gwt
private Object statementHelper(TokenStream ts) throws IOException, JavaScriptException {
    Object pn = null;
    boolean skipsemi = false;
    int tt;
    int lastExprType = 0;
    tt = ts.getToken();
    switch(tt) {
        case TokenStream.IF:
            {
                skipsemi = true;
                sourceAdd((char) ts.IF);
                int lineno = ts.getLineno();
                Object cond = condition(ts);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                Object ifTrue = statement(ts);
                Object ifFalse = null;
                if (ts.matchToken(ts.ELSE)) {
                    sourceAdd((char) ts.RC);
                    sourceAdd((char) ts.ELSE);
                    sourceAdd((char) ts.LC);
                    sourceAdd((char) ts.EOL);
                    ifFalse = statement(ts);
                }
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                pn = nf.createIf(cond, ifTrue, ifFalse, lineno);
                break;
            }
        case TokenStream.SWITCH:
            {
                skipsemi = true;
                sourceAdd((char) ts.SWITCH);
                pn = nf.createSwitch(ts.getLineno());
                Object cur_case = null;
                Object case_statements;
                mustMatchToken(ts, ts.LP, "msg.no.paren.switch");
                sourceAdd((char) ts.LP);
                nf.addChildToBack(pn, expr(ts, false));
                mustMatchToken(ts, ts.GWT, "msg.no.paren.after.switch");
                sourceAdd((char) ts.GWT);
                mustMatchToken(ts, ts.LC, "msg.no.brace.switch");
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                while ((tt = ts.getToken()) != ts.RC && tt != ts.EOF) {
                    switch(tt) {
                        case TokenStream.CASE:
                            sourceAdd((char) ts.CASE);
                            cur_case = nf.createUnary(ts.CASE, expr(ts, false));
                            sourceAdd((char) ts.COLON);
                            sourceAdd((char) ts.EOL);
                            break;
                        case TokenStream.DEFAULT:
                            cur_case = nf.createLeaf(ts.DEFAULT);
                            sourceAdd((char) ts.DEFAULT);
                            sourceAdd((char) ts.COLON);
                            sourceAdd((char) ts.EOL);
                            break;
                        default:
                            reportError(ts, "msg.bad.switch");
                            break;
                    }
                    mustMatchToken(ts, ts.COLON, "msg.no.colon.case");
                    case_statements = nf.createLeaf(TokenStream.BLOCK);
                    ((Node) case_statements).setIsSyntheticBlock(true);
                    while ((tt = ts.peekToken()) != ts.RC && tt != ts.CASE && tt != ts.DEFAULT && tt != ts.EOF) {
                        nf.addChildToBack(case_statements, statement(ts));
                    }
                    nf.addChildToBack(cur_case, case_statements);
                    nf.addChildToBack(pn, cur_case);
                }
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                break;
            }
        case TokenStream.WHILE:
            {
                skipsemi = true;
                sourceAdd((char) ts.WHILE);
                int lineno = ts.getLineno();
                Object cond = condition(ts);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                Object body = statement(ts);
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                pn = nf.createWhile(cond, body, lineno);
                break;
            }
        case TokenStream.DO:
            {
                sourceAdd((char) ts.DO);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                int lineno = ts.getLineno();
                Object body = statement(ts);
                sourceAdd((char) ts.RC);
                mustMatchToken(ts, ts.WHILE, "msg.no.while.do");
                sourceAdd((char) ts.WHILE);
                Object cond = condition(ts);
                pn = nf.createDoWhile(body, cond, lineno);
                break;
            }
        case TokenStream.FOR:
            {
                skipsemi = true;
                sourceAdd((char) ts.FOR);
                int lineno = ts.getLineno();
                Object init;
                Object cond;
                Object incr = null;
                Object body;
                mustMatchToken(ts, ts.LP, "msg.no.paren.for");
                sourceAdd((char) ts.LP);
                tt = ts.peekToken();
                if (tt == ts.SEMI) {
                    init = nf.createLeaf(ts.VOID);
                } else {
                    if (tt == ts.VAR) {
                        ts.getToken();
                        init = variables(ts, true);
                    } else {
                        init = expr(ts, true);
                    }
                }
                tt = ts.peekToken();
                if (tt == ts.RELOP && ts.getOp() == ts.IN) {
                    ts.matchToken(ts.RELOP);
                    sourceAdd((char) ts.IN);
                    cond = expr(ts, false);
                } else {
                    mustMatchToken(ts, ts.SEMI, "msg.no.semi.for");
                    sourceAdd((char) ts.SEMI);
                    if (ts.peekToken() == ts.SEMI) {
                        cond = nf.createLeaf(ts.VOID);
                    } else {
                        cond = expr(ts, false);
                    }
                    mustMatchToken(ts, ts.SEMI, "msg.no.semi.for.cond");
                    sourceAdd((char) ts.SEMI);
                    if (ts.peekToken() == ts.GWT) {
                        incr = nf.createLeaf(ts.VOID);
                    } else {
                        incr = expr(ts, false);
                    }
                }
                mustMatchToken(ts, ts.GWT, "msg.no.paren.for.ctrl");
                sourceAdd((char) ts.GWT);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                body = statement(ts);
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                if (incr == null) {
                    pn = nf.createForIn(init, cond, body, lineno);
                } else {
                    pn = nf.createFor(init, cond, incr, body, lineno);
                }
                break;
            }
        case TokenStream.TRY:
            {
                int lineno = ts.getLineno();
                Object tryblock;
                Object catchblocks = null;
                Object finallyblock = null;
                skipsemi = true;
                sourceAdd((char) ts.TRY);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                tryblock = statement(ts);
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                catchblocks = nf.createLeaf(TokenStream.BLOCK);
                boolean sawDefaultCatch = false;
                int peek = ts.peekToken();
                if (peek == ts.CATCH) {
                    while (ts.matchToken(ts.CATCH)) {
                        if (sawDefaultCatch) {
                            reportError(ts, "msg.catch.unreachable");
                        }
                        sourceAdd((char) ts.CATCH);
                        mustMatchToken(ts, ts.LP, "msg.no.paren.catch");
                        sourceAdd((char) ts.LP);
                        mustMatchToken(ts, ts.NAME, "msg.bad.catchcond");
                        String varName = ts.getString();
                        sourceAddString(ts.NAME, varName);
                        Object catchCond = null;
                        if (ts.matchToken(ts.IF)) {
                            sourceAdd((char) ts.IF);
                            catchCond = expr(ts, false);
                        } else {
                            sawDefaultCatch = true;
                        }
                        mustMatchToken(ts, ts.GWT, "msg.bad.catchcond");
                        sourceAdd((char) ts.GWT);
                        mustMatchToken(ts, ts.LC, "msg.no.brace.catchblock");
                        sourceAdd((char) ts.LC);
                        sourceAdd((char) ts.EOL);
                        nf.addChildToBack(catchblocks, nf.createCatch(varName, catchCond, statements(ts), ts.getLineno()));
                        mustMatchToken(ts, ts.RC, "msg.no.brace.after.body");
                        sourceAdd((char) ts.RC);
                        sourceAdd((char) ts.EOL);
                    }
                } else if (peek != ts.FINALLY) {
                    mustMatchToken(ts, ts.FINALLY, "msg.try.no.catchfinally");
                }
                if (ts.matchToken(ts.FINALLY)) {
                    sourceAdd((char) ts.FINALLY);
                    sourceAdd((char) ts.LC);
                    sourceAdd((char) ts.EOL);
                    finallyblock = statement(ts);
                    sourceAdd((char) ts.RC);
                    sourceAdd((char) ts.EOL);
                }
                pn = nf.createTryCatchFinally(tryblock, catchblocks, finallyblock, lineno);
                break;
            }
        case TokenStream.THROW:
            {
                int lineno = ts.getLineno();
                sourceAdd((char) ts.THROW);
                pn = nf.createThrow(expr(ts, false), lineno);
                if (lineno == ts.getLineno())
                    wellTerminated(ts, ts.ERROR);
                break;
            }
        case TokenStream.BREAK:
            {
                int lineno = ts.getLineno();
                sourceAdd((char) ts.BREAK);
                String label = matchLabel(ts);
                if (label != null) {
                    sourceAddString(ts.NAME, label);
                }
                pn = nf.createBreak(label, lineno);
                break;
            }
        case TokenStream.CONTINUE:
            {
                int lineno = ts.getLineno();
                sourceAdd((char) ts.CONTINUE);
                String label = matchLabel(ts);
                if (label != null) {
                    sourceAddString(ts.NAME, label);
                }
                pn = nf.createContinue(label, lineno);
                break;
            }
        case TokenStream.DEBUGGER:
            {
                int lineno = ts.getLineno();
                sourceAdd((char) ts.DEBUGGER);
                pn = nf.createDebugger(lineno);
                break;
            }
        case TokenStream.WITH:
            {
                reportError(ts, "msg.jsni.unsupported.with");
                skipsemi = true;
                sourceAdd((char) ts.WITH);
                int lineno = ts.getLineno();
                mustMatchToken(ts, ts.LP, "msg.no.paren.with");
                sourceAdd((char) ts.LP);
                Object obj = expr(ts, false);
                mustMatchToken(ts, ts.GWT, "msg.no.paren.after.with");
                sourceAdd((char) ts.GWT);
                sourceAdd((char) ts.LC);
                sourceAdd((char) ts.EOL);
                Object body = statement(ts);
                sourceAdd((char) ts.RC);
                sourceAdd((char) ts.EOL);
                pn = nf.createWith(obj, body, lineno);
                break;
            }
        case TokenStream.VAR:
            {
                int lineno = ts.getLineno();
                pn = variables(ts, false);
                if (ts.getLineno() == lineno)
                    wellTerminated(ts, ts.ERROR);
                break;
            }
        case TokenStream.RETURN:
            {
                Object retExpr = null;
                int lineno = 0;
                sourceAdd((char) ts.RETURN);
                if ((ts.flags & ts.TSF_FUNCTION) == 0)
                    reportError(ts, "msg.bad.return");
                ts.flags |= ts.TSF_REGEXP;
                tt = ts.peekTokenSameLine();
                ts.flags &= ~ts.TSF_REGEXP;
                if (tt != ts.EOF && tt != ts.EOL && tt != ts.SEMI && tt != ts.RC) {
                    lineno = ts.getLineno();
                    retExpr = expr(ts, false);
                    if (ts.getLineno() == lineno)
                        wellTerminated(ts, ts.ERROR);
                    ts.flags |= ts.TSF_RETURN_EXPR;
                } else {
                    ts.flags |= ts.TSF_RETURN_VOID;
                }
                pn = nf.createReturn(retExpr, lineno);
                break;
            }
        case TokenStream.LC:
            skipsemi = true;
            pn = statements(ts);
            mustMatchToken(ts, ts.RC, "msg.no.brace.block");
            break;
        case TokenStream.ERROR:
        case TokenStream.EOL:
        case TokenStream.SEMI:
            pn = nf.createLeaf(ts.VOID);
            skipsemi = true;
            break;
        default:
            {
                lastExprType = tt;
                int tokenno = ts.getTokenno();
                ts.ungetToken(tt);
                int lineno = ts.getLineno();
                pn = expr(ts, false);
                if (ts.peekToken() == ts.COLON) {
                    if (lastExprType != ts.NAME || (ts.getTokenno() != tokenno))
                        reportError(ts, "msg.bad.label");
                    ts.getToken();
                    String name = ts.getString();
                    pn = nf.createLabel(name, lineno);
                    nf.addChildToBack(pn, statement(ts));
                    sourceAdd((char) ts.COLON);
                    sourceAdd((char) ts.EOL);
                    return pn;
                }
                if (lastExprType == ts.FUNCTION) {
                    if (nf.getLeafType(pn) != ts.FUNCTION) {
                        reportError(ts, "msg.syntax");
                    }
                }
                pn = nf.createExprStatement(pn, lineno);
                if (ts.getLineno() == lineno || (lastExprType == ts.FUNCTION && ts.getLineno() == lastExprEndLine)) {
                    wellTerminated(ts, lastExprType);
                }
                break;
            }
    }
    ts.matchToken(ts.SEMI);
    if (!skipsemi) {
        sourceAdd((char) ts.SEMI);
        sourceAdd((char) ts.EOL);
    }
    return pn;
}
109666.821675elasticsearch
private static Map<String, RoleDescriptor> initializeReservedRoles() {
    return Map.ofEntries(entry("superuser", SUPERUSER_ROLE_DESCRIPTOR), entry("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants the privileges required to access the cluster through the Java Transport Client. " + "The Java Transport Client fetches information about the nodes in the cluster using " + "the Node Liveness API and the Cluster State API (when sniffing is enabled). " + "Assign your users this role if they use the Transport Client.")), entry("kibana_admin", kibanaAdminUser("kibana_admin", MetadataUtils.DEFAULT_RESERVED_METADATA)), entry("kibana_user", kibanaAdminUser("kibana_user", MetadataUtils.getDeprecatedReservedMetadata("Please use the [kibana_admin] role instead"))), entry("monitoring_user", new RoleDescriptor("monitoring_user", new String[] { "cluster:monitor/main", "cluster:monitor/xpack/info", TransportRemoteInfoAction.TYPE.name() }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("/metrics-(beats|elasticsearch|enterprisesearch|kibana|logstash).*/").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metricbeat-*").privileges("read", "read_cross_cluster").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_monitoring").build() }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, new RoleDescriptor.RemoteIndicesPrivileges[] { getRemoteIndicesReadPrivileges(".monitoring-*"), getRemoteIndicesReadPrivileges("/metrics-(beats|elasticsearch|enterprisesearch|kibana|logstash).*/"), getRemoteIndicesReadPrivileges("metricbeat-*") }, null, null, "Grants the minimum privileges required for any user of X-Pack monitoring other than those required to use Kibana. " + "This role grants access to the monitoring indices and grants privileges necessary " + "for reading basic cluster information. " + "This role also includes all Kibana privileges for the Elastic Stack monitoring features. " + "Monitoring users should also be assigned the kibana_admin role, " + "or another role with access to the Kibana instance.")), entry("remote_monitoring_agent", new RoleDescriptor("remote_monitoring_agent", new String[] { "manage_index_templates", "manage_ingest_pipelines", "monitor", GetLifecycleAction.NAME, ILMActions.PUT.name(), "cluster:monitor/xpack/watcher/watch/get", "cluster:admin/xpack/watcher/watch/put", "cluster:admin/xpack/watcher/watch/delete" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metricbeat-*").privileges("index", "create_index", "view_index_metadata", TransportIndicesAliasesAction.NAME, RolloverAction.NAME).build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants the minimum privileges required to write data into the monitoring indices (.monitoring-*). " + "This role also has the privileges necessary to create Metricbeat indices (metricbeat-*) " + "and write data into them.")), entry("remote_monitoring_collector", new RoleDescriptor("remote_monitoring_collector", new String[] { "monitor" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("monitor").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("read").allowRestrictedIndices(true).build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants the minimum privileges required to collect monitoring data for the Elastic Stack.")), entry("ingest_admin", new RoleDescriptor("ingest_admin", new String[] { "manage_index_templates", "manage_pipeline" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access to manage all index templates and all ingest pipeline configurations.")), entry("reporting_user", new RoleDescriptor("reporting_user", null, null, null, null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use Kibana feature privileges instead"), null, null, null, null, "Grants the specific privileges required for users of X-Pack reporting other than those required to use Kibana. " + "This role grants access to the reporting indices; each user has access to only their own reports. " + "Reporting users should also be assigned additional roles that grant access to Kibana as well as read access " + "to the indices that will be used to generate reports.")), entry(KibanaSystemUser.ROLE_NAME, kibanaSystemRoleDescriptor(KibanaSystemUser.ROLE_NAME)), entry("logstash_system", new RoleDescriptor("logstash_system", new String[] { "monitor", MonitoringBulkAction.NAME }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access necessary for the Logstash system user to send system-level data (such as monitoring) to Elasticsearch. " + "This role should not be assigned to users as the granted permissions may change between releases.")), entry("beats_admin", new RoleDescriptor("beats_admin", null, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".management-beats").privileges("all").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access to the .management-beats index, which contains configuration information for the Beats.")), entry(UsernamesField.BEATS_ROLE, new RoleDescriptor(UsernamesField.BEATS_ROLE, new String[] { "monitor", MonitoringBulkAction.NAME }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-beats-*").privileges("create_index", "create").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access necessary for the Beats system user to send system-level data (such as monitoring) to Elasticsearch. " + "This role should not be assigned to users as the granted permissions may change between releases.")), entry(UsernamesField.APM_ROLE, new RoleDescriptor(UsernamesField.APM_ROLE, new String[] { "monitor", MonitoringBulkAction.NAME }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-beats-*").privileges("create_index", "create_doc").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access necessary for the APM system user to send system-level data (such as monitoring) to Elasticsearch.\n")), entry("apm_user", new RoleDescriptor("apm_user", null, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("apm-*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-apm.*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-apm-*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-apm.*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-apm-*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm.*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm-*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("observability-annotations").privileges("read", "view_index_metadata").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_ml_apm_user").build() }, null, null, MetadataUtils.getDeprecatedReservedMetadata("This role will be removed in a future major release. Please use editor and viewer roles instead"), null, null, null, null, "Grants the privileges required for APM users (such as read and view_index_metadata privileges " + "on the apm-* and .ml-anomalies* indices).")), entry("inference_admin", new RoleDescriptor("inference_admin", new String[] { "manage_inference" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access necessary to manage inference models and performing inference.")), entry("inference_user", new RoleDescriptor("inference_user", new String[] { "monitor_inference" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access necessary to perform inference.")), entry("machine_learning_user", new RoleDescriptor("machine_learning_user", new String[] { "monitor_ml" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-notifications*").privileges("view_index_metadata", "read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".ml-annotations*").privileges("view_index_metadata", "read", "write").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_ml_user").build() }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants the minimum privileges required to view machine learning configuration, status, and work with results. " + "This role grants monitor_ml cluster privileges, read access to the .ml-notifications and .ml-anomalies* indices " + "(which store machine learning results), and write access to .ml-annotations* indices. " + "Machine learning users also need index privileges for source and destination indices " + "and roles that grant access to Kibana. ")), entry("machine_learning_admin", new RoleDescriptor("machine_learning_admin", new String[] { "manage_ml" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-notifications*", ".ml-state*", ".ml-meta*", ".ml-stats-*").allowRestrictedIndices(true).privileges("view_index_metadata", "read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".ml-annotations*").privileges("view_index_metadata", "read", "write").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_ml_admin").build() }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Provides all of the privileges of the machine_learning_user role plus the full use of the machine learning APIs. " + "Grants manage_ml cluster privileges, read access to .ml-anomalies*, .ml-notifications*, .ml-state*, " + ".ml-meta* indices and write access to .ml-annotations* indices. " + "Machine learning administrators also need index privileges for source and destination indices " + "and roles that grant access to Kibana.")), entry("data_frame_transforms_admin", new RoleDescriptor("data_frame_transforms_admin", new String[] { "manage_data_frame_transforms" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS).privileges("view_index_metadata", "read").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_ml_user").build() }, null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use the [transform_admin] role instead"), null, null, null, null, "Grants manage_data_frame_transforms cluster privileges, which enable you to manage transforms. " + "This role also includes all Kibana privileges for the machine learning features.")), entry("data_frame_transforms_user", new RoleDescriptor("data_frame_transforms_user", new String[] { "monitor_data_frame_transforms" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS).privileges("view_index_metadata", "read").build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder().application("kibana-*").resources("*").privileges("reserved_ml_user").build() }, null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use the [transform_user] role instead"), null, null, null, null, "Grants monitor_data_frame_transforms cluster privileges, which enable you to use transforms. " + "This role also includes all Kibana privileges for the machine learning features. ")), entry("transform_admin", new RoleDescriptor("transform_admin", new String[] { "manage_transform" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS).privileges("view_index_metadata", "read").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants manage_transform cluster privileges, which enable you to manage transforms. " + "This role also includes all Kibana privileges for the machine learning features.")), entry("transform_user", new RoleDescriptor("transform_user", new String[] { "monitor_transform" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(TransformInternalIndexConstants.AUDIT_INDEX_PATTERN, TransformInternalIndexConstants.AUDIT_INDEX_PATTERN_DEPRECATED, TransformInternalIndexConstants.AUDIT_INDEX_READ_ALIAS).privileges("view_index_metadata", "read").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants monitor_transform cluster privileges, which enable you to perform read-only operations related to " + "transforms. This role also includes all Kibana privileges for the machine learning features.")), entry("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, HistoryStoreField.INDEX_PREFIX + "*").privileges("read").allowRestrictedIndices(true).build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Allows users to create and execute all Watcher actions. " + "Grants read access to the .watches index. Also grants read access " + "to the watch history and the triggered watches index.")), entry("watcher_user", new RoleDescriptor("watcher_user", new String[] { "monitor_watcher" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX).privileges("read").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(HistoryStoreField.INDEX_PREFIX + "*").privileges("read").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants read access to the .watches index, the get watch action and the watcher stats.")), entry("logstash_admin", new RoleDescriptor("logstash_admin", new String[] { "manage_logstash_pipelines" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".logstash*").privileges("create", "delete", "index", "manage", "read").allowRestrictedIndices(true).build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access to the .logstash* indices for managing configurations, " + "and grants necessary access for logstash-specific APIs exposed by the logstash x-pack plugin.")), entry("rollup_user", new RoleDescriptor("rollup_user", new String[] { "monitor_rollup" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants monitor_rollup cluster privileges, which enable you to perform read-only operations related to rollups.")), entry("rollup_admin", new RoleDescriptor("rollup_admin", new String[] { "manage_rollup" }, null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants manage_rollup cluster privileges, which enable you to manage and execute all rollup actions.")), entry("snapshot_user", new RoleDescriptor("snapshot_user", new String[] { "create_snapshot", GetRepositoriesAction.NAME }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata").allowRestrictedIndices(true).build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants the necessary privileges to create snapshots of all the indices and to view their metadata. " + "This role enables users to view the configuration of existing snapshot repositories and snapshot details. " + "It does not grant authority to remove or add repositories or to restore snapshots. " + "It also does not enable to change index settings or to read or update data stream or index data.")), entry("enrich_user", new RoleDescriptor("enrich_user", new String[] { "manage_enrich", "manage_ingest_pipelines", "monitor" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".enrich-*").privileges("read", "view_index_metadata").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".enrich-*").privileges("manage", "write").build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, null, null, null, "Grants access to manage all enrich indices (.enrich-*) and all operations on ingest pipelines.")), entry("viewer", buildViewerRoleDescriptor()), entry("editor", buildEditorRoleDescriptor()));
}
108139.6216544elasticsearch
protected Response mutateInstanceForVersion(Response instance, TransportVersion version) {
    if (version.before(TransportVersions.V_8_0_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), null, new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), null)).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_1_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), null, nodeStats.getLastAccess(), nodeStats.getPendingCount(), 0, null, 0, 0, nodeStats.getRoutingState(), nodeStats.getStartTime(), null, null, 0L, 0L, null, null)).toList(), Priority.NORMAL))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_2_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), null, nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), null, nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), 0L, 0L, null, null)).toList(), Priority.NORMAL))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_4_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), null, stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), null, nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), null, nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), nodeStats.getPeakThroughput(), nodeStats.getThroughputLastPeriod(), nodeStats.getAvgInferenceTimeLastPeriod(), null)).toList(), Priority.NORMAL))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_5_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), null, nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), nodeStats.getCacheHitCount().orElse(null), nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), nodeStats.getPeakThroughput(), nodeStats.getThroughputLastPeriod(), nodeStats.getAvgInferenceTimeLastPeriod(), nodeStats.getCacheHitCountLastPeriod().orElse(null))).toList(), Priority.NORMAL))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_6_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null), nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), nodeStats.getCacheHitCount().orElse(null), nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), nodeStats.getPeakThroughput(), nodeStats.getThroughputLastPeriod(), nodeStats.getAvgInferenceTimeLastPeriod(), nodeStats.getCacheHitCountLastPeriod().orElse(null))).toList(), Priority.NORMAL))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.V_8_8_0)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null), nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), nodeStats.getCacheHitCount().orElse(null), nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), nodeStats.getPeakThroughput(), nodeStats.getThroughputLastPeriod(), nodeStats.getAvgInferenceTimeLastPeriod(), nodeStats.getCacheHitCountLastPeriod().orElse(null))).toList(), stats.getDeploymentStats().getPriority()))).toList(), instance.getResources().count(), RESULTS_FIELD));
    } else if (version.before(TransportVersions.NODE_STATS_INGEST_BYTES)) {
        return new Response(new QueryPage<>(instance.getResources().results().stream().map(stats -> new Response.TrainedModelStats(stats.getModelId(), stats.getModelSizeStats(), new IngestStats(stats.getIngestStats().totalStats(), stats.getIngestStats().pipelineStats().stream().map(pipelineStat -> new IngestStats.PipelineStat(pipelineStat.pipelineId(), pipelineStat.stats(), new IngestStats.ByteStats(0, 0))).toList(), stats.getIngestStats().processorStats()), stats.getPipelineCount(), stats.getInferenceStats(), stats.getDeploymentStats() == null ? null : new AssignmentStats(stats.getDeploymentStats().getDeploymentId(), stats.getDeploymentStats().getModelId(), stats.getDeploymentStats().getThreadsPerAllocation(), stats.getDeploymentStats().getNumberOfAllocations(), stats.getDeploymentStats().getQueueCapacity(), stats.getDeploymentStats().getCacheSize(), stats.getDeploymentStats().getStartTime(), stats.getDeploymentStats().getNodeStats().stream().map(nodeStats -> new AssignmentStats.NodeStats(nodeStats.getNode(), nodeStats.getInferenceCount().orElse(null), nodeStats.getAvgInferenceTime().orElse(null), nodeStats.getAvgInferenceTimeExcludingCacheHit().orElse(null), nodeStats.getLastAccess(), nodeStats.getPendingCount(), nodeStats.getErrorCount(), nodeStats.getCacheHitCount().orElse(null), nodeStats.getRejectedExecutionCount(), nodeStats.getTimeoutCount(), nodeStats.getRoutingState(), nodeStats.getStartTime(), nodeStats.getThreadsPerAllocation(), nodeStats.getNumberOfAllocations(), nodeStats.getPeakThroughput(), nodeStats.getThroughputLastPeriod(), nodeStats.getAvgInferenceTimeLastPeriod(), nodeStats.getCacheHitCountLastPeriod().orElse(null))).toList(), stats.getDeploymentStats().getPriority()))).toList(), instance.getResources().count(), RESULTS_FIELD));
    }
    return instance;
}
109448.5560300hadoop
public boolean run() throws IOException, YarnException {
    LOG.info("Running Client");
    isRunning.set(true);
    yarnClient.start();
    clientStartTime = System.currentTimeMillis();
    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());
    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress=" + node.getHttpAddress() + ", nodeRackName=" + node.getRackName() + ", nodeNumContainers=" + node.getNumContainers());
    }
    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    if (queueInfo == null) {
        throw new IllegalArgumentException(String.format("Queue %s not present in scheduler configuration.", this.amQueue));
    }
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name());
        }
    }
    if (domainId != null && domainId.length() > 0 && toCreateDomain) {
        prepareTimelineDomain();
    }
    Map<String, Resource> profiles;
    try {
        profiles = yarnClient.getResourceProfiles();
    } catch (YARNFeatureNotEnabledException re) {
        profiles = null;
    }
    List<String> appProfiles = new ArrayList<>(2);
    appProfiles.add(amResourceProfile);
    appProfiles.add(containerResourceProfile);
    for (String appProfile : appProfiles) {
        if (appProfile != null && !appProfile.isEmpty()) {
            if (profiles == null) {
                String message = "Resource profiles is not enabled";
                LOG.error(message);
                throw new IOException(message);
            }
            if (!profiles.containsKey(appProfile)) {
                String message = "Unknown resource profile '" + appProfile + "'. Valid resource profiles are " + profiles.keySet();
                LOG.error(message);
                throw new IOException(message);
            }
        }
    }
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    long maxMem = appResponse.getMaximumResourceCapability().getMemorySize();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }
    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores);
    if (amVCores > maxVCores) {
        LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores);
        amVCores = maxVCores;
    }
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    applicationId = appContext.getApplicationId();
    List<ResourceTypeInfo> resourceTypes = yarnClient.getResourceTypeInfo();
    setAMResourceCapability(appContext, profiles, resourceTypes);
    setContainerResources(profiles, resourceTypes);
    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);
    if (attemptFailuresValidityInterval >= 0) {
        appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
    }
    Set<String> tags = new HashSet<String>();
    if (applicationTags != null) {
        tags.addAll(applicationTags);
    }
    if (flowName != null) {
        tags.add(TimelineUtils.generateFlowNameTag(flowName));
    }
    if (flowVersion != null) {
        tags.add(TimelineUtils.generateFlowVersionTag(flowVersion));
    }
    if (flowRunId != 0) {
        tags.add(TimelineUtils.generateFlowRunIdTag(flowRunId));
    }
    appContext.setApplicationTags(tags);
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    FileSystem fs = FileSystem.get(conf);
    addToLocalResources(fs, appMasterJar, appMasterJarPath, applicationId.toString(), localResources, null);
    if (!log4jPropFile.isEmpty()) {
        addToLocalResources(fs, log4jPropFile, log4jPath, applicationId.toString(), localResources, null);
    }
    StringBuilder localizableFiles = new StringBuilder();
    filesToLocalize.stream().forEach(path -> {
        File f = new File(path);
        if (!f.exists()) {
            throw new UncheckedIOException(new IOException(path + " does not exist"));
        }
        if (!f.canRead()) {
            throw new UncheckedIOException(new IOException(path + " cannot be read"));
        }
        if (f.isDirectory()) {
            throw new UncheckedIOException(new IOException(path + " is a directory"));
        }
        try {
            String fileName = f.getName();
            uploadFile(fs, path, fileName, applicationId.toString());
            if (localizableFiles.length() == 0) {
                localizableFiles.append(fileName);
            } else {
                localizableFiles.append(",").append(fileName);
            }
        } catch (IOException e) {
            throw new UncheckedIOException("Cannot upload file: " + path, e);
        }
    });
    String hdfsShellScriptLocation = "";
    long hdfsShellScriptLen = 0;
    long hdfsShellScriptTimestamp = 0;
    if (!shellScriptPath.isEmpty()) {
        Path shellSrc = new Path(shellScriptPath);
        String shellPathSuffix = ApplicationMaster.getRelativePath(appName, applicationId.toString(), SCRIPT_PATH);
        Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix);
        fs.copyFromLocalFile(false, true, shellSrc, shellDst);
        hdfsShellScriptLocation = shellDst.toUri().toString();
        FileStatus shellFileStatus = fs.getFileStatus(shellDst);
        hdfsShellScriptLen = shellFileStatus.getLen();
        hdfsShellScriptTimestamp = shellFileStatus.getModificationTime();
    }
    if (!shellCommand.isEmpty()) {
        addToLocalResources(fs, null, shellCommandPath, applicationId.toString(), localResources, shellCommand);
    }
    if (shellArgs.length > 0) {
        addToLocalResources(fs, null, shellArgsPath, applicationId.toString(), localResources, StringUtils.join(shellArgs, " "));
    }
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation);
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp));
    env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen));
    if (domainId != null && domainId.length() > 0) {
        env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId);
    }
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()).append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append(c.trim());
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append(System.getProperty("java.class.path"));
    }
    env.put("CLASSPATH", classPathEnv.toString());
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);
    LOG.info("Setting up app master command");
    vargs.add("\"" + Environment.JAVA_HOME.$$() + "/bin/java\"");
    vargs.add("-Xmx" + amMemory + "m");
    vargs.add(ApplicationConstants.JVM_ADD_OPENS_VAR);
    vargs.add(appMasterMainClass);
    if (containerType != null) {
        vargs.add("--container_type " + String.valueOf(containerType));
    }
    if (autoPromoteContainers) {
        vargs.add("--promote_opportunistic_after_start");
    }
    if (enforceExecType) {
        vargs.add("--enforce_execution_type");
    }
    if (containerMemory > 0) {
        vargs.add("--container_memory " + String.valueOf(containerMemory));
    }
    if (containerVirtualCores > 0) {
        vargs.add("--container_vcores " + String.valueOf(containerVirtualCores));
    }
    if (!containerResources.isEmpty()) {
        Joiner.MapJoiner joiner = Joiner.on(',').withKeyValueSeparator("=");
        vargs.add("--container_resources " + joiner.join(containerResources));
    }
    if (containerResourceProfile != null && !containerResourceProfile.isEmpty()) {
        vargs.add("--container_resource_profile " + containerResourceProfile);
    }
    vargs.add("--num_containers " + String.valueOf(numContainers));
    if (placementSpec != null && placementSpec.length() > 0) {
        String encodedSpec = Base64.getEncoder().encodeToString(placementSpec.getBytes(StandardCharsets.UTF_8));
        LOG.info("Encode placement spec: " + encodedSpec);
        vargs.add("--placement_spec " + encodedSpec);
    }
    if (null != nodeLabelExpression) {
        appContext.setNodeLabelExpression(nodeLabelExpression);
    }
    vargs.add("--priority " + String.valueOf(shellCmdPriority));
    if (keepContainers) {
        vargs.add("--keep_containers_across_application_attempts");
    }
    for (Map.Entry<String, String> entry : shellEnv.entrySet()) {
        vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue());
    }
    if (debugFlag) {
        vargs.add("--debug");
    }
    if (localizableFiles.length() > 0) {
        vargs.add("--localized_files " + localizableFiles.toString());
    }
    vargs.add("--appname " + appName);
    vargs.add("--homedir " + fs.getHomeDirectory());
    vargs.addAll(containerRetryOptions);
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }
    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null);
    Credentials rmCredentials = null;
    if (UserGroupInformation.isSecurityEnabled()) {
        rmCredentials = new Credentials();
        String tokenRenewer = YarnClientUtils.getRmPrincipal(conf);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }
        final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, rmCredentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
    }
    Credentials dockerCredentials = null;
    if (dockerClientConfig != null) {
        dockerCredentials = DockerClientConfigHandler.readCredentialsFromConfigFile(new Path(dockerClientConfig), conf, applicationId.toString());
    }
    if (rmCredentials != null || dockerCredentials != null) {
        DataOutputBuffer dob = new DataOutputBuffer();
        if (rmCredentials != null) {
            rmCredentials.writeTokenStorageToStream(dob);
        }
        if (dockerCredentials != null) {
            dockerCredentials.writeTokenStorageToStream(dob);
        }
        ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(tokens);
    }
    appContext.setAMContainerSpec(amContainer);
    Priority pri = Priority.newInstance(amPriority);
    appContext.setPriority(pri);
    appContext.setQueue(amQueue);
    specifyLogAggregationContext(appContext);
    LOG.info("Submitting application to ASM");
    yarnClient.submitApplication(appContext);
    return monitorApplication(applicationId);
}
116989.1255299elasticsearch
public void parse(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Boolean defaultRequireAlias, @Nullable Boolean defaultRequireDataStream, @Nullable Boolean defaultListExecutedPipelines, boolean allowExplicitIndex, XContentType xContentType, BiConsumer<IndexRequest, String> indexRequestConsumer, Consumer<UpdateRequest> updateRequestConsumer, Consumer<DeleteRequest> deleteRequestConsumer) throws IOException {
    XContent xContent = xContentType.xContent();
    int line = 0;
    int from = 0;
    byte marker = xContent.streamSeparator();
    final Map<String, String> stringDeduplicator = new HashMap<>();
    boolean typesDeprecationLogged = false;
    while (true) {
        int nextMarker = findNextMarker(marker, from, data);
        if (nextMarker == -1) {
            break;
        }
        line++;
        try (XContentParser parser = createParser(xContent, data, from, nextMarker)) {
            from = nextMarker + 1;
            XContentParser.Token token = parser.nextToken();
            if (token == null) {
                continue;
            }
            if (token != XContentParser.Token.START_OBJECT) {
                throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT + " but found [" + token + "]");
            }
            token = parser.nextToken();
            if (token != XContentParser.Token.FIELD_NAME) {
                throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.FIELD_NAME + " but found [" + token + "]");
            }
            String action = parser.currentName();
            if (SUPPORTED_ACTIONS.contains(action) == false) {
                throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected field [create], [delete], [index] or [update] but found [" + action + "]");
            }
            String index = defaultIndex;
            String type = null;
            String id = null;
            String routing = defaultRouting;
            FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
            String opType = null;
            long version = Versions.MATCH_ANY;
            VersionType versionType = VersionType.INTERNAL;
            long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
            long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
            int retryOnConflict = 0;
            String pipeline = defaultPipeline;
            boolean requireAlias = defaultRequireAlias != null && defaultRequireAlias;
            boolean requireDataStream = defaultRequireDataStream != null && defaultRequireDataStream;
            boolean listExecutedPipelines = defaultListExecutedPipelines != null && defaultListExecutedPipelines;
            Map<String, String> dynamicTemplates = Map.of();
            token = parser.nextToken();
            if (token == XContentParser.Token.START_OBJECT) {
                String currentFieldName = null;
                while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                    if (token == XContentParser.Token.FIELD_NAME) {
                        currentFieldName = parser.currentName();
                    } else if (token.isValue()) {
                        if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) {
                            if (allowExplicitIndex == false) {
                                throw new IllegalArgumentException("explicit index in bulk is not allowed");
                            }
                            index = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity());
                        } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
                            if (parser.getRestApiVersion().matches(RestApiVersion.equalTo(RestApiVersion.V_7))) {
                                if (deprecateOrErrorOnType && typesDeprecationLogged == false) {
                                    deprecationLogger.compatibleCritical("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE);
                                    typesDeprecationLogged = true;
                                }
                            } else if (parser.getRestApiVersion().matches(RestApiVersion.onOrAfter(RestApiVersion.V_8)) && deprecateOrErrorOnType) {
                                throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
                            }
                            type = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity());
                        } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) {
                            id = parser.text();
                        } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) {
                            routing = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity());
                        } else if (OP_TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
                            opType = parser.text();
                        } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) {
                            version = parser.longValue();
                        } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) {
                            versionType = VersionType.fromString(parser.text());
                        } else if (IF_SEQ_NO.match(currentFieldName, parser.getDeprecationHandler())) {
                            ifSeqNo = parser.longValue();
                        } else if (IF_PRIMARY_TERM.match(currentFieldName, parser.getDeprecationHandler())) {
                            ifPrimaryTerm = parser.longValue();
                        } else if (RETRY_ON_CONFLICT.match(currentFieldName, parser.getDeprecationHandler())) {
                            retryOnConflict = parser.intValue();
                        } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) {
                            pipeline = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity());
                        } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) {
                            fetchSourceContext = FetchSourceContext.fromXContent(parser);
                        } else if (REQUIRE_ALIAS.match(currentFieldName, parser.getDeprecationHandler())) {
                            requireAlias = parser.booleanValue();
                        } else if (REQUIRE_DATA_STREAM.match(currentFieldName, parser.getDeprecationHandler())) {
                            requireDataStream = parser.booleanValue();
                        } else if (LIST_EXECUTED_PIPELINES.match(currentFieldName, parser.getDeprecationHandler())) {
                            listExecutedPipelines = parser.booleanValue();
                        } else {
                            throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
                        }
                    } else if (token == XContentParser.Token.START_ARRAY) {
                        throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
                    } else if (token == XContentParser.Token.START_OBJECT && DYNAMIC_TEMPLATES.match(currentFieldName, parser.getDeprecationHandler())) {
                        dynamicTemplates = parser.mapStrings();
                    } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, parser.getDeprecationHandler())) {
                        fetchSourceContext = FetchSourceContext.fromXContent(parser);
                    } else if (token != XContentParser.Token.VALUE_NULL) {
                        throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
                    }
                }
            } else if (token != XContentParser.Token.END_OBJECT) {
                throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]");
            }
            checkBulkActionIsProperlyClosed(parser);
            if ("delete".equals(action)) {
                if (dynamicTemplates.isEmpty() == false) {
                    throw new IllegalArgumentException("Delete request in line [" + line + "] does not accept " + DYNAMIC_TEMPLATES.getPreferredName());
                }
                deleteRequestConsumer.accept(new DeleteRequest(index).id(id).routing(routing).version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm));
            } else {
                nextMarker = findNextMarker(marker, from, data);
                if (nextMarker == -1) {
                    break;
                }
                line++;
                if ("index".equals(action)) {
                    if (opType == null) {
                        indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing).version(version).versionType(versionType).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType).setDynamicTemplates(dynamicTemplates).setRequireAlias(requireAlias).setRequireDataStream(requireDataStream).setListExecutedPipelines(listExecutedPipelines), type);
                    } else {
                        indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing).version(version).versionType(versionType).create("create".equals(opType)).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType).setDynamicTemplates(dynamicTemplates).setRequireAlias(requireAlias).setRequireDataStream(requireDataStream).setListExecutedPipelines(listExecutedPipelines), type);
                    }
                } else if ("create".equals(action)) {
                    indexRequestConsumer.accept(new IndexRequest(index).id(id).routing(routing).version(version).versionType(versionType).create(true).setPipeline(pipeline).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType).setDynamicTemplates(dynamicTemplates).setRequireAlias(requireAlias).setRequireDataStream(requireDataStream).setListExecutedPipelines(listExecutedPipelines), type);
                } else if ("update".equals(action)) {
                    if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) {
                        throw new IllegalArgumentException("Update requests do not support versioning. " + "Please use `if_seq_no` and `if_primary_term` instead");
                    }
                    if (requireDataStream) {
                        throw new IllegalArgumentException("Update requests do not support the `require_data_stream` flag, " + "as data streams do not support update operations");
                    }
                    if (dynamicTemplates.isEmpty() == false) {
                        throw new IllegalArgumentException("Update request in line [" + line + "] does not accept " + DYNAMIC_TEMPLATES.getPreferredName());
                    }
                    UpdateRequest updateRequest = new UpdateRequest().index(index).id(id).routing(routing).retryOnConflict(retryOnConflict).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm).setRequireAlias(requireAlias).routing(routing);
                    try (XContentParser sliceParser = createParser(xContent, sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType))) {
                        updateRequest.fromXContent(sliceParser);
                    }
                    if (fetchSourceContext != null) {
                        updateRequest.fetchSource(fetchSourceContext);
                    }
                    IndexRequest upsertRequest = updateRequest.upsertRequest();
                    if (upsertRequest != null) {
                        upsertRequest.setPipeline(pipeline).setListExecutedPipelines(listExecutedPipelines);
                    }
                    updateRequestConsumer.accept(updateRequest);
                }
                from = nextMarker + 1;
            }
        }
    }
}
1110891.321551wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111054.971558wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111160.721567wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.JOURNAL_MAX_ATTIC_FILES, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST, ServerDefinition.CRITICAL_ANALYZER_ENABLED, ServerDefinition.CRITICAL_ANALYZER_CHECK_PERIOD, ServerDefinition.CRITICAL_ANALYZER_POLICY, ServerDefinition.CRITICAL_ANALYZER_TIMEOUT).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME, BridgeDefinition.CALL_TIMEOUT)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111182.911565wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.ADDRESS_QUEUE_SCAN_PERIOD, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.JOURNAL_MAX_ATTIC_FILES, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST, ServerDefinition.CRITICAL_ANALYZER_ENABLED, ServerDefinition.CRITICAL_ANALYZER_CHECK_PERIOD, ServerDefinition.CRITICAL_ANALYZER_POLICY, ServerDefinition.CRITICAL_ANALYZER_TIMEOUT).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME, BridgeDefinition.CALL_TIMEOUT)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1110950.761547wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.ADDRESS_QUEUE_SCAN_PERIOD, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.JOURNAL_MAX_ATTIC_FILES, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST, ServerDefinition.CRITICAL_ANALYZER_ENABLED, ServerDefinition.CRITICAL_ANALYZER_CHECK_PERIOD, ServerDefinition.CRITICAL_ANALYZER_POLICY, ServerDefinition.CRITICAL_ANALYZER_TIMEOUT).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.SHARED_STORE_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.SHARED_STORE_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_CREATED_QUEUES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.ATTRIBUTES)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111024.051551wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.ADDRESS_QUEUE_SCAN_PERIOD, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.JOURNAL_MAX_ATTIC_FILES, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST, ServerDefinition.CRITICAL_ANALYZER_ENABLED, ServerDefinition.CRITICAL_ANALYZER_CHECK_PERIOD, ServerDefinition.CRITICAL_ANALYZER_POLICY, ServerDefinition.CRITICAL_ANALYZER_TIMEOUT).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.SHARED_STORE_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.SHARED_STORE_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_CREATED_QUEUES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.ATTRIBUTES)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111046.141552wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.ADDRESS_QUEUE_SCAN_PERIOD, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.JOURNAL_MAX_ATTIC_FILES, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS, ServerDefinition.NETWORK_CHECK_LIST, ServerDefinition.NETWORK_CHECK_NIC, ServerDefinition.NETWORK_CHECK_PERIOD, ServerDefinition.NETWORK_CHECK_PING6_COMMAND, ServerDefinition.NETWORK_CHECK_PING_COMMAND, ServerDefinition.NETWORK_CHECK_TIMEOUT, ServerDefinition.NETWORK_CHECK_URL_LIST, ServerDefinition.CRITICAL_ANALYZER_ENABLED, ServerDefinition.CRITICAL_ANALYZER_CHECK_PERIOD, ServerDefinition.CRITICAL_ANALYZER_POLICY, ServerDefinition.CRITICAL_ANALYZER_TIMEOUT).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.SHARED_STORE_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.SHARED_STORE_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(MessagingExtension.CONFIGURATION_PRIMARY_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(MessagingExtension.CONFIGURATION_SECONDARY_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_READ_PAGE_BYTES, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_CREATED_QUEUES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS, CommonAttributes.SSL_CONTEXT)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.ATTRIBUTES)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLOCKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_ALLOWLIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111100.031559wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder discoveryGroup = builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder pooledConnectionFactory = builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.REBALANCE_CONNECTIONS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.ALLOW_LOCAL_TRANSACTIONS, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.CREDENTIAL_REFERENCE, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS, ConnectionFactoryAttributes.Pooled.STATISTICS_ENABLED);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(discoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES)).addChild(pooledConnectionFactory).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(discoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(pooledConnectionFactory)).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111166.791561wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder discoveryGroup = builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder pooledConnectionFactory = builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.REBALANCE_CONNECTIONS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.ALLOW_LOCAL_TRANSACTIONS, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.CREDENTIAL_REFERENCE, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS, ConnectionFactoryAttributes.Pooled.STATISTICS_ENABLED);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(discoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES)).addChild(pooledConnectionFactory).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(discoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(pooledConnectionFactory)).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1111161.511561wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder discoveryGroup = builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder pooledConnectionFactory = builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.REBALANCE_CONNECTIONS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.ALLOW_LOCAL_TRANSACTIONS, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.CREDENTIAL_REFERENCE, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS, ConnectionFactoryAttributes.Pooled.STATISTICS_ENABLED);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(discoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES)).addChild(pooledConnectionFactory).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(discoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(pooledConnectionFactory)).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
124489.2179228cassandra
public int compare(StatsTable stx, StatsTable sty) {
    if (stx == null || sty == null)
        throw new NullPointerException("StatsTableComparator cannot compare null objects");
    int sign = ascending ? 1 : -1;
    int result = 0;
    if (sortKey.equals("average_live_cells_per_slice_last_five_minutes")) {
        result = compareDoubles(stx.averageLiveCellsPerSliceLastFiveMinutes, sty.averageLiveCellsPerSliceLastFiveMinutes);
    } else if (sortKey.equals("average_tombstones_per_slice_last_five_minutes")) {
        result = compareDoubles(stx.averageTombstonesPerSliceLastFiveMinutes, sty.averageTombstonesPerSliceLastFiveMinutes);
    } else if (sortKey.equals("bloom_filter_false_positives")) {
        result = sign * ((Long) stx.bloomFilterFalsePositives).compareTo((Long) sty.bloomFilterFalsePositives);
    } else if (sortKey.equals("bloom_filter_false_ratio")) {
        result = compareDoubles(stx.bloomFilterFalseRatio, sty.bloomFilterFalseRatio);
    } else if (sortKey.equals("bloom_filter_off_heap_memory_used")) {
        if (stx.bloomFilterOffHeapUsed && !sty.bloomFilterOffHeapUsed)
            return sign;
        else if (!stx.bloomFilterOffHeapUsed && sty.bloomFilterOffHeapUsed)
            return sign * -1;
        else if (!stx.bloomFilterOffHeapUsed && !sty.bloomFilterOffHeapUsed)
            result = 0;
        else {
            result = compareFileSizes(stx.bloomFilterOffHeapMemoryUsed, sty.bloomFilterOffHeapMemoryUsed);
        }
    } else if (sortKey.equals("bloom_filter_space_used")) {
        result = compareFileSizes(stx.bloomFilterSpaceUsed, sty.bloomFilterSpaceUsed);
    } else if (sortKey.equals("compacted_partition_maximum_bytes")) {
        result = sign * Long.valueOf(stx.compactedPartitionMaximumBytes).compareTo(Long.valueOf(sty.compactedPartitionMaximumBytes));
    } else if (sortKey.equals("compacted_partition_mean_bytes")) {
        result = sign * Long.valueOf(stx.compactedPartitionMeanBytes).compareTo(Long.valueOf(sty.compactedPartitionMeanBytes));
    } else if (sortKey.equals("compacted_partition_minimum_bytes")) {
        result = sign * Long.valueOf(stx.compactedPartitionMinimumBytes).compareTo(Long.valueOf(sty.compactedPartitionMinimumBytes));
    } else if (sortKey.equals("compression_metadata_off_heap_memory_used")) {
        if (stx.compressionMetadataOffHeapUsed && !sty.compressionMetadataOffHeapUsed)
            return sign;
        else if (!stx.compressionMetadataOffHeapUsed && sty.compressionMetadataOffHeapUsed)
            return sign * -1;
        else if (!stx.compressionMetadataOffHeapUsed && !sty.compressionMetadataOffHeapUsed)
            result = 0;
        else {
            result = compareFileSizes(stx.compressionMetadataOffHeapMemoryUsed, sty.compressionMetadataOffHeapMemoryUsed);
        }
    } else if (sortKey.equals("full_name")) {
        return sign * stx.fullName.compareTo(sty.fullName);
    } else if (sortKey.equals("index_summary_off_heap_memory_used")) {
        if (stx.indexSummaryOffHeapUsed && !sty.indexSummaryOffHeapUsed)
            return sign;
        else if (!stx.indexSummaryOffHeapUsed && sty.indexSummaryOffHeapUsed)
            return sign * -1;
        else if (!stx.indexSummaryOffHeapUsed && !sty.indexSummaryOffHeapUsed)
            result = 0;
        else {
            result = compareFileSizes(stx.indexSummaryOffHeapMemoryUsed, sty.indexSummaryOffHeapMemoryUsed);
        }
    } else if (sortKey.equals("local_read_count") || sortKey.equals("reads")) {
        result = sign * Long.valueOf(stx.localReadCount).compareTo(Long.valueOf(sty.localReadCount));
    } else if (sortKey.equals("local_read_latency_ms") || sortKey.equals("read_latency")) {
        result = compareDoubles(stx.localReadLatencyMs, sty.localReadLatencyMs);
    } else if (sortKey.equals("local_write_count") || sortKey.equals("writes")) {
        result = sign * Long.valueOf(stx.localWriteCount).compareTo(Long.valueOf(sty.localWriteCount));
    } else if (sortKey.equals("local_write_latency_ms") || sortKey.equals("write_latency")) {
        result = compareDoubles(stx.localWriteLatencyMs, sty.localWriteLatencyMs);
    } else if (sortKey.equals("local_read_write_ratio")) {
        result = compareDoubles(stx.localReadWriteRatio, sty.localReadWriteRatio);
    } else if (sortKey.equals("maximum_live_cells_per_slice_last_five_minutes")) {
        result = sign * Long.valueOf(stx.maximumLiveCellsPerSliceLastFiveMinutes).compareTo(Long.valueOf(sty.maximumLiveCellsPerSliceLastFiveMinutes));
    } else if (sortKey.equals("maximum_tombstones_per_slice_last_five_minutes")) {
        result = sign * Long.valueOf(stx.maximumTombstonesPerSliceLastFiveMinutes).compareTo(Long.valueOf(sty.maximumTombstonesPerSliceLastFiveMinutes));
    } else if (sortKey.equals("memtable_cell_count")) {
        result = sign * ((Long) stx.memtableCellCount).compareTo((Long) sty.memtableCellCount);
    } else if (sortKey.equals("memtable_data_size")) {
        result = compareFileSizes(stx.memtableDataSize, sty.memtableDataSize);
    } else if (sortKey.equals("memtable_off_heap_memory_used")) {
        if (stx.memtableOffHeapUsed && !sty.memtableOffHeapUsed)
            return sign;
        else if (!stx.memtableOffHeapUsed && sty.memtableOffHeapUsed)
            return sign * -1;
        else if (!stx.memtableOffHeapUsed && !sty.memtableOffHeapUsed)
            result = 0;
        else {
            result = compareFileSizes(stx.memtableOffHeapMemoryUsed, sty.memtableOffHeapMemoryUsed);
        }
    } else if (sortKey.equals("memtable_switch_count")) {
        result = sign * ((Long) stx.memtableSwitchCount).compareTo((Long) sty.memtableSwitchCount);
    } else if (sortKey.equals("number_of_partitions_estimate")) {
        result = sign * ((Long) stx.numberOfPartitionsEstimate).compareTo((Long) sty.numberOfPartitionsEstimate);
    } else if (sortKey.equals("off_heap_memory_used_total")) {
        if (stx.offHeapUsed && !sty.offHeapUsed)
            return sign;
        else if (!stx.offHeapUsed && sty.offHeapUsed)
            return sign * -1;
        else if (!stx.offHeapUsed && !sty.offHeapUsed)
            result = 0;
        else {
            result = compareFileSizes(stx.offHeapMemoryUsedTotal, sty.offHeapMemoryUsedTotal);
        }
    } else if (sortKey.equals("pending_flushes")) {
        result = sign * ((Long) stx.pendingFlushes).compareTo((Long) sty.pendingFlushes);
    } else if (sortKey.equals("percent_repaired")) {
        result = compareDoubles(stx.percentRepaired, sty.percentRepaired);
    } else if (sortKey.equals("max_sstable_size")) {
        result = sign * stx.maxSSTableSize.compareTo(sty.maxSSTableSize);
    } else if (sortKey.equals("twcs_max_duration")) {
        if (stx.twcsDurationInMillis != null && sty.twcsDurationInMillis == null)
            return sign;
        else if (stx.twcsDurationInMillis == null && sty.twcsDurationInMillis != null)
            return sign * -1;
        else if (stx.twcsDurationInMillis == null)
            return 0;
        else
            result = sign * stx.twcsDurationInMillis.compareTo(sty.twcsDurationInMillis);
    } else if (sortKey.equals("space_used_by_snapshots_total")) {
        result = compareFileSizes(stx.spaceUsedBySnapshotsTotal, sty.spaceUsedBySnapshotsTotal);
    } else if (sortKey.equals("space_used_live")) {
        result = compareFileSizes(stx.spaceUsedLive, sty.spaceUsedLive);
    } else if (sortKey.equals("space_used_total")) {
        result = compareFileSizes(stx.spaceUsedTotal, sty.spaceUsedTotal);
    } else if (sortKey.equals("sstable_compression_ratio")) {
        result = compareDoubles(stx.sstableCompressionRatio, sty.sstableCompressionRatio);
    } else if (sortKey.equals("sstable_count")) {
        result = sign * ((Integer) stx.sstableCount).compareTo((Integer) sty.sstableCount);
    } else if (sortKey.equals("table_name")) {
        return sign * stx.tableName.compareTo(sty.tableName);
    } else {
        throw new IllegalStateException(String.format("Unsupported sort key: %s", sortKey));
    }
    return (result == 0) ? stx.fullName.compareTo(sty.fullName) : result;
}
1217384.893408elasticsearch
public void testMultiLevelNestedSorting() throws IOException {
    XContentBuilder mapping = XContentFactory.jsonBuilder();
    mapping.startObject();
    {
        mapping.startObject("_doc");
        {
            mapping.startObject("properties");
            {
                {
                    mapping.startObject("title");
                    mapping.field("type", "text");
                    mapping.endObject();
                }
                {
                    mapping.startObject("genre");
                    mapping.field("type", "keyword");
                    mapping.endObject();
                }
                {
                    mapping.startObject("chapters");
                    mapping.field("type", "nested");
                    {
                        mapping.startObject("properties");
                        {
                            mapping.startObject("title");
                            mapping.field("type", "text");
                            mapping.endObject();
                        }
                        {
                            mapping.startObject("read_time_seconds");
                            mapping.field("type", "integer");
                            mapping.endObject();
                        }
                        {
                            mapping.startObject("paragraphs");
                            mapping.field("type", "nested");
                            {
                                mapping.startObject("properties");
                                {
                                    {
                                        mapping.startObject("header");
                                        mapping.field("type", "text");
                                        mapping.endObject();
                                    }
                                    {
                                        mapping.startObject("content");
                                        mapping.field("type", "text");
                                        mapping.endObject();
                                    }
                                    {
                                        mapping.startObject("word_count");
                                        mapping.field("type", "integer");
                                        mapping.endObject();
                                    }
                                }
                                mapping.endObject();
                            }
                            mapping.endObject();
                        }
                        mapping.endObject();
                    }
                    mapping.endObject();
                }
            }
            mapping.endObject();
        }
        mapping.endObject();
    }
    mapping.endObject();
    IndexService indexService = createIndex("nested_sorting", Settings.EMPTY, mapping);
    List<List<Document>> books = new ArrayList<>();
    {
        List<Document> book = new ArrayList<>();
        Document document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 743));
        document.add(new IntPoint("chapters.paragraphs.word_count", 743));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "chapter 3", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 400));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 400));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 234));
        document.add(new IntPoint("chapters.paragraphs.word_count", 234));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "chapter 2", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 200));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 200));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "Paragraph 2", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 478));
        document.add(new IntPoint("chapters.paragraphs.word_count", 478));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "Paragraph 1", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 849));
        document.add(new IntPoint("chapters.paragraphs.word_count", 849));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 1400));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 1400));
        book.add(document);
        document = new Document();
        document.add(new StringField("genre", "science fiction", Field.Store.NO));
        document.add(new StringField("_id", "1", Field.Store.YES));
        document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
        book.add(document);
        books.add(book);
    }
    {
        List<Document> book = new ArrayList<>();
        Document document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "Introduction", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 76));
        document.add(new IntPoint("chapters.paragraphs.word_count", 76));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "chapter 1", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 20));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 20));
        book.add(document);
        document = new Document();
        document.add(new StringField("genre", "romance", Field.Store.NO));
        document.add(new StringField("_id", "2", Field.Store.YES));
        document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
        book.add(document);
        books.add(book);
    }
    {
        List<Document> book = new ArrayList<>();
        Document document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "A bad dream", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 976));
        document.add(new IntPoint("chapters.paragraphs.word_count", 976));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "The beginning of the end", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 1200));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 1200));
        book.add(document);
        document = new Document();
        document.add(new StringField("genre", "horror", Field.Store.NO));
        document.add(new StringField("_id", "3", Field.Store.YES));
        document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
        book.add(document);
        books.add(book);
    }
    {
        List<Document> book = new ArrayList<>();
        Document document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "macaroni", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 180));
        document.add(new IntPoint("chapters.paragraphs.word_count", 180));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "hamburger", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 150));
        document.add(new IntPoint("chapters.paragraphs.word_count", 150));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "tosti", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 120));
        document.add(new IntPoint("chapters.paragraphs.word_count", 120));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "easy meals", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 800));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 800));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.paragraphs.header", "introduction", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters.paragraphs", Field.Store.NO));
        document.add(new TextField("chapters.paragraphs.text", "some text...", Field.Store.NO));
        document.add(new SortedNumericDocValuesField("chapters.paragraphs.word_count", 87));
        document.add(new IntPoint("chapters.paragraphs.word_count", 87));
        book.add(document);
        document = new Document();
        document.add(new TextField("chapters.title", "introduction", Field.Store.NO));
        document.add(new StringField("_nested_path", "chapters", Field.Store.NO));
        document.add(new IntPoint("chapters.read_time_seconds", 10));
        document.add(new NumericDocValuesField("chapters.read_time_seconds", 10));
        book.add(document);
        document = new Document();
        document.add(new StringField("genre", "cooking", Field.Store.NO));
        document.add(new StringField("_id", "4", Field.Store.YES));
        document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
        book.add(document);
        books.add(book);
    }
    {
        List<Document> book = new ArrayList<>();
        Document document = new Document();
        document.add(new StringField("genre", "unknown", Field.Store.NO));
        document.add(new StringField("_id", "5", Field.Store.YES));
        document.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
        book.add(document);
        books.add(book);
    }
    Collections.shuffle(books, random());
    for (List<Document> book : books) {
        writer.addDocuments(book);
        if (randomBoolean()) {
            writer.commit();
        }
    }
    DirectoryReader reader = DirectoryReader.open(writer);
    reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0));
    IndexSearcher searcher = newSearcher(reader, false);
    SearchExecutionContext searchExecutionContext = indexService.newSearchExecutionContext(0, 0, searcher, () -> 0L, null, emptyMap());
    FieldSortBuilder sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count");
    sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setNestedSort(new NestedSortBuilder("chapters.paragraphs")));
    QueryBuilder queryBuilder = new MatchAllQueryBuilder();
    TopFieldDocs topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
    assertThat(topFields.totalHits.value, equalTo(5L));
    assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
    assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
    assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4"));
    assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(87L));
    assertThat(searcher.doc(topFields.scoreDocs[2].doc).get("_id"), equalTo("1"));
    assertThat(((FieldDoc) topFields.scoreDocs[2]).fields[0], equalTo(234L));
    assertThat(searcher.doc(topFields.scoreDocs[3].doc).get("_id"), equalTo("3"));
    assertThat(((FieldDoc) topFields.scoreDocs[3]).fields[0], equalTo(976L));
    assertThat(searcher.doc(topFields.scoreDocs[4].doc).get("_id"), equalTo("5"));
    assertThat(((FieldDoc) topFields.scoreDocs[4]).fields[0], equalTo(Long.MAX_VALUE));
    {
        queryBuilder = new TermQueryBuilder("genre", "romance");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
        queryBuilder = new TermQueryBuilder("genre", "science fiction");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(234L));
        queryBuilder = new TermQueryBuilder("genre", "horror");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L));
        queryBuilder = new TermQueryBuilder("genre", "cooking");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
    }
    {
        sortBuilder.order(SortOrder.DESC);
        queryBuilder = new MatchAllQueryBuilder();
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(5L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L));
        assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("1"));
        assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(849L));
        assertThat(searcher.doc(topFields.scoreDocs[2].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[2]).fields[0], equalTo(180L));
        assertThat(searcher.doc(topFields.scoreDocs[3].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[3]).fields[0], equalTo(76L));
        assertThat(searcher.doc(topFields.scoreDocs[4].doc).get("_id"), equalTo("5"));
        assertThat(((FieldDoc) topFields.scoreDocs[4]).fields[0], equalTo(Long.MIN_VALUE));
    }
    {
        queryBuilder = new TermQueryBuilder("genre", "romance");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
        queryBuilder = new TermQueryBuilder("genre", "science fiction");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(849L));
        queryBuilder = new TermQueryBuilder("genre", "horror");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L));
        queryBuilder = new TermQueryBuilder("genre", "cooking");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(180L));
    }
    {
        queryBuilder = new RangeQueryBuilder("chapters.read_time_seconds").to(50L);
        sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count");
        sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setFilter(queryBuilder).setNestedSort(new NestedSortBuilder("chapters.paragraphs")));
        topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(2L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
        assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(87L));
        sortBuilder.order(SortOrder.DESC);
        topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(2L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
        assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(76L));
    }
    {
        queryBuilder = new RangeQueryBuilder("chapters.read_time_seconds").to(50L);
        sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count");
        sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setFilter(queryBuilder).setNestedSort(new NestedSortBuilder("chapters.paragraphs").setFilter(new RangeQueryBuilder("chapters.paragraphs.word_count").from(80L))));
        topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(2L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
        assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(Long.MAX_VALUE));
        sortBuilder.order(SortOrder.DESC);
        topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(2L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
        assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(Long.MIN_VALUE));
    }
    {
        sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count");
        sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setFilter(new RangeQueryBuilder("chapters.read_time_seconds").to(50L)).setNestedSort(new NestedSortBuilder("chapters.paragraphs")));
        queryBuilder = new TermQueryBuilder("genre", "romance");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
        queryBuilder = new TermQueryBuilder("genre", "science fiction");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE));
        queryBuilder = new TermQueryBuilder("genre", "horror");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE));
        queryBuilder = new TermQueryBuilder("genre", "cooking");
        topFields = search(queryBuilder, sortBuilder, searchExecutionContext, searcher);
        assertThat(topFields.totalHits.value, equalTo(1L));
        assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
        assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
    }
    searcher.getIndexReader().close();
}
1215608.082447hadoop
public void testGetApplications() throws Exception {
    ApplicationCLI cli = createAndGetAppCLI();
    ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
    ApplicationReport newApplicationReport = ApplicationReport.newInstance(applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null, Sets.newHashSet("tag1", "tag3"), false, Priority.UNDEFINED, "", "");
    List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
    applicationReports.add(newApplicationReport);
    ApplicationId applicationId2 = ApplicationId.newInstance(1234, 6);
    ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2), "user2", "queue2", "appname2", "host2", 125, null, YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", null, Sets.newHashSet("tag2", "tag3"), false, Priority.UNDEFINED, "", "");
    applicationReports.add(newApplicationReport2);
    ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7);
    ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null, Sets.newHashSet("tag1", "tag4"), false, Priority.UNDEFINED, "", "");
    applicationReports.add(newApplicationReport3);
    ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8);
    ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4), "user4", "queue4", "appname4", "host4", 127, null, YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE", null, Sets.newHashSet("tag1"), false, Priority.UNDEFINED, "", "");
    applicationReports.add(newApplicationReport4);
    ApplicationId applicationId5 = ApplicationId.newInstance(1234, 9);
    ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5), "user5", "queue5", "appname5", "host5", 128, null, YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5, 5, FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", null, Sets.newHashSet("tag2", "tag4"), false, Priority.UNDEFINED, "", "");
    applicationReports.add(newApplicationReport5);
    ApplicationId applicationId6 = ApplicationId.newInstance(1234, 10);
    ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6), "user6", "queue6", "appname6", "host6", 129, null, YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6, 6, FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG", null, new HashSet<String>(), false, Priority.UNDEFINED, "", "");
    applicationReports.add(newApplicationReport6);
    Set<String> appType1 = new HashSet<String>();
    EnumSet<YarnApplicationState> appState1 = EnumSet.noneOf(YarnApplicationState.class);
    appState1.add(YarnApplicationState.RUNNING);
    appState1.add(YarnApplicationState.ACCEPTED);
    appState1.add(YarnApplicationState.SUBMITTED);
    Set<String> appTag = new HashSet<String>();
    when(client.getApplications(appType1, appState1, appTag)).thenReturn(getApplicationReports(applicationReports, appType1, appState1, appTag, false));
    int result = cli.run(new String[] { "application", "-list" });
    assertEquals(0, result);
    verify(client).getApplications(appType1, appState1, appTag);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintWriter pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType1 + ", states: " + appState1 + " and tags: " + appTag + ")" + ":" + 4);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0007\t            ");
    pw.print("appname3\t           MAPREDUCE\t     user3\t    ");
    pw.print("queue3\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         73.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0009\t            ");
    pw.print("appname5\t                HIVE\t     user5\t    ");
    pw.print("queue5\t          ACCEPTED\t            ");
    pw.print("KILLED\t         93.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0010\t            ");
    pw.print("appname6\t                 PIG\t     user6\t    ");
    pw.print("queue6\t         SUBMITTED\t            ");
    pw.print("KILLED\t         99.79%");
    pw.println("\t                                N/A");
    pw.close();
    String appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType2 = new HashSet<String>();
    appType2.add("YARN");
    appType2.add("NON-YARN");
    EnumSet<YarnApplicationState> appState2 = EnumSet.noneOf(YarnApplicationState.class);
    appState2.add(YarnApplicationState.RUNNING);
    appState2.add(YarnApplicationState.ACCEPTED);
    appState2.add(YarnApplicationState.SUBMITTED);
    when(client.getApplications(appType2, appState2, appTag)).thenReturn(getApplicationReports(applicationReports, appType2, appState2, appTag, false));
    result = cli.run(new String[] { "application", "-list", "-appTypes", "YARN, ,,  NON-YARN", "   ,, ,," });
    assertEquals(0, result);
    verify(client).getApplications(appType2, appState2, appTag);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType2 + ", states: " + appState2 + " and tags: " + appTag + ")" + ":" + 1);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType3 = new HashSet<String>();
    EnumSet<YarnApplicationState> appState3 = EnumSet.noneOf(YarnApplicationState.class);
    appState3.add(YarnApplicationState.FINISHED);
    appState3.add(YarnApplicationState.FAILED);
    when(client.getApplications(appType3, appState3, appTag)).thenReturn(getApplicationReports(applicationReports, appType3, appState3, appTag, false));
    result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , FAILED", ",,FINISHED" });
    assertEquals(0, result);
    verify(client).getApplications(appType3, appState3, appTag);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType3 + ", states: " + appState3 + " and tags: " + appTag + ")" + ":" + 2);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0006\t            ");
    pw.print("appname2\t            NON-YARN\t     user2\t    ");
    pw.print("queue2\t          FINISHED\t         ");
    pw.print("SUCCEEDED\t         63.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0008\t            ");
    pw.print("appname4\t       NON-MAPREDUCE\t     user4\t    ");
    pw.print("queue4\t            FAILED\t         ");
    pw.print("SUCCEEDED\t         83.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType4 = new HashSet<String>();
    appType4.add("YARN");
    appType4.add("NON-YARN");
    EnumSet<YarnApplicationState> appState4 = EnumSet.noneOf(YarnApplicationState.class);
    appState4.add(YarnApplicationState.FINISHED);
    appState4.add(YarnApplicationState.FAILED);
    when(client.getApplications(appType4, appState4, appTag)).thenReturn(getApplicationReports(applicationReports, appType4, appState4, appTag, false));
    result = cli.run(new String[] { "application", "-list", "--appTypes", "YARN,NON-YARN", "--appStates", "FINISHED ,, , FAILED" });
    assertEquals(0, result);
    verify(client).getApplications(appType2, appState2, appTag);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType4 + ", states: " + appState4 + " and tags: " + appTag + ")" + ":" + 1);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0006\t            ");
    pw.print("appname2\t            NON-YARN\t     user2\t    ");
    pw.print("queue2\t          FINISHED\t         ");
    pw.print("SUCCEEDED\t         63.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , INVALID" });
    assertEquals(-1, result);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("The application state  INVALID is invalid.");
    pw.print("The valid application state can be one of the following: ");
    StringBuilder sb = new StringBuilder();
    sb.append("ALL,");
    for (YarnApplicationState state : YarnApplicationState.values()) {
        sb.append(state + ",");
    }
    String output = sb.toString();
    pw.println(output.substring(0, output.length() - 1));
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType5 = new HashSet<String>();
    EnumSet<YarnApplicationState> appState5 = EnumSet.noneOf(YarnApplicationState.class);
    appState5.add(YarnApplicationState.FINISHED);
    when(client.getApplications(appType5, appState5, appTag)).thenReturn(getApplicationReports(applicationReports, appType5, appState5, appTag, true));
    result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , ALL" });
    assertEquals(0, result);
    verify(client).getApplications(appType5, appState5, appTag);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType5 + ", states: " + appState5 + " and tags: " + appTag + ")" + ":" + 6);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0006\t            ");
    pw.print("appname2\t            NON-YARN\t     user2\t    ");
    pw.print("queue2\t          FINISHED\t         ");
    pw.print("SUCCEEDED\t         63.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0007\t            ");
    pw.print("appname3\t           MAPREDUCE\t     user3\t    ");
    pw.print("queue3\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         73.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0008\t            ");
    pw.print("appname4\t       NON-MAPREDUCE\t     user4\t    ");
    pw.print("queue4\t            FAILED\t         ");
    pw.print("SUCCEEDED\t         83.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0009\t            ");
    pw.print("appname5\t                HIVE\t     user5\t    ");
    pw.print("queue5\t          ACCEPTED\t            ");
    pw.print("KILLED\t         93.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0010\t            ");
    pw.print("appname6\t                 PIG\t     user6\t    ");
    pw.print("queue6\t         SUBMITTED\t            ");
    pw.print("KILLED\t         99.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType6 = new HashSet<String>();
    appType6.add("YARN");
    appType6.add("NON-YARN");
    EnumSet<YarnApplicationState> appState6 = EnumSet.noneOf(YarnApplicationState.class);
    appState6.add(YarnApplicationState.FINISHED);
    when(client.getApplications(appType6, appState6, appTag)).thenReturn(getApplicationReports(applicationReports, appType6, appState6, appTag, false));
    result = cli.run(new String[] { "application", "-list", "-appTypes", "YARN, ,,  NON-YARN", "--appStates", "finished" });
    assertEquals(0, result);
    verify(client).getApplications(appType6, appState6, appTag);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType6 + ", states: " + appState6 + " and tags: " + appTag + ")" + ":" + 1);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0006\t            ");
    pw.print("appname2\t            NON-YARN\t     user2\t    ");
    pw.print("queue2\t          FINISHED\t         ");
    pw.print("SUCCEEDED\t         63.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appTag1 = Sets.newHashSet("tag1");
    when(client.getApplications(appType1, appState1, appTag1)).thenReturn(getApplicationReports(applicationReports, appType1, appState1, appTag1, false));
    result = cli.run(new String[] { "application", "-list", "-appTags", "tag1" });
    assertEquals(0, result);
    verify(client).getApplications(appType1, appState1, appTag1);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType1 + ", states: " + appState1 + " and tags: " + appTag1 + ")" + ":" + 2);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0007\t            ");
    pw.print("appname3\t           MAPREDUCE\t     user3\t    ");
    pw.print("queue3\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         73.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    EnumSet<YarnApplicationState> appState7 = EnumSet.of(YarnApplicationState.RUNNING, YarnApplicationState.FAILED);
    when(client.getApplications(appType1, appState7, appTag1)).thenReturn(getApplicationReports(applicationReports, appType1, appState7, appTag1, false));
    result = cli.run(new String[] { "application", "-list", "-appStates", "RUNNING,FAILED", "-appTags", "tag1" });
    assertEquals(0, result);
    verify(client).getApplications(appType1, appState7, appTag1);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType1 + ", states: " + appState7 + " and tags: " + appTag1 + ")" + ":" + 3);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0007\t            ");
    pw.print("appname3\t           MAPREDUCE\t     user3\t    ");
    pw.print("queue3\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         73.79%");
    pw.println("\t                                N/A");
    pw.print("         application_1234_0008\t            ");
    pw.print("appname4\t       NON-MAPREDUCE\t     user4\t    ");
    pw.print("queue4\t            FAILED\t         ");
    pw.print("SUCCEEDED\t         83.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType9 = Sets.newHashSet("YARN");
    Set<String> appTag2 = Sets.newHashSet("tag3");
    when(client.getApplications(appType9, appState1, appTag2)).thenReturn(getApplicationReports(applicationReports, appType9, appState1, appTag2, false));
    result = cli.run(new String[] { "application", "-list", "-appTypes", "YARN", "-appTags", "tag3" });
    assertEquals(0, result);
    verify(client).getApplications(appType9, appState1, appTag2);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType9 + ", states: " + appState1 + " and tags: " + appTag2 + ")" + ":" + 1);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0005\t             ");
    pw.print("appname\t                YARN\t      user\t     ");
    pw.print("queue\t           RUNNING\t         ");
    pw.print("SUCCEEDED\t         53.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    Set<String> appType10 = Sets.newHashSet("HIVE");
    Set<String> appTag3 = Sets.newHashSet("tag4");
    EnumSet<YarnApplicationState> appState10 = EnumSet.of(YarnApplicationState.ACCEPTED);
    when(client.getApplications(appType10, appState10, appTag3)).thenReturn(getApplicationReports(applicationReports, appType10, appState10, appTag3, false));
    result = cli.run(new String[] { "application", "-list", "-appTypes", "HIVE", "-appStates", "ACCEPTED", "-appTags", "tag4" });
    assertEquals(0, result);
    verify(client).getApplications(appType10, appState10, appTag3);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total number of applications (application-types: " + appType10 + ", states: " + appState10 + " and tags: " + appTag3 + ")" + ":" + 1);
    pw.print("                Application-Id\t    Application-Name");
    pw.print("\t    Application-Type");
    pw.print("\t      User\t     Queue\t             State\t       ");
    pw.print("Final-State\t       Progress");
    pw.println("\t                       Tracking-URL");
    pw.print("         application_1234_0009\t            ");
    pw.print("appname5\t                HIVE\t     user5\t    ");
    pw.print("queue5\t          ACCEPTED\t            ");
    pw.print("KILLED\t         93.79%");
    pw.println("\t                                N/A");
    pw.close();
    appsReportStr = baos.toString("UTF-8");
    Assert.assertEquals(appsReportStr, sysOutStream.toString());
    verify(sysOut, times(10)).write(any(byte[].class), anyInt(), anyInt());
}
126720.058262hadoop
public void launchContainer(ContainerRuntimeContext ctx) throws ContainerExecutionException {
    Container container = ctx.getContainer();
    ContainerId containerId = container.getContainerId();
    String containerIdStr = containerId.toString();
    Map<String, String> environment = container.getLaunchContext().getEnvironment();
    String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
    String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK);
    String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME);
    String runtime = environment.get(ENV_DOCKER_CONTAINER_DOCKER_RUNTIME);
    boolean serviceMode = Boolean.parseBoolean(environment.get(ENV_DOCKER_CONTAINER_DOCKER_SERVICE_MODE));
    boolean useEntryPoint = serviceMode || checkUseEntryPoint(environment);
    String clientConfig = environment.get(ENV_DOCKER_CONTAINER_CLIENT_CONFIG);
    if (imageName == null || imageName.isEmpty()) {
        imageName = defaultImageName;
    }
    if (network == null || network.isEmpty()) {
        network = defaultNetwork;
    }
    validateContainerNetworkType(network);
    validateHostname(hostname);
    validateImageName(imageName);
    validateContainerRuntimeType(runtime);
    if (defaultImageUpdate) {
        pullImageFromRemote(containerIdStr, imageName);
    }
    String runAsUser = ctx.getExecutionAttribute(RUN_AS_USER);
    String dockerRunAsUser = runAsUser;
    Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR);
    String[] groups = null;
    if (enableUserReMapping) {
        String uid = getUserIdInfo(runAsUser);
        groups = getGroupIdInfo(runAsUser);
        String gid = groups[0];
        if (Integer.parseInt(uid) < userRemappingUidThreshold) {
            String message = "uid: " + uid + " below threshold: " + userRemappingUidThreshold;
            throw new ContainerExecutionException(message);
        }
        for (int i = 0; i < groups.length; i++) {
            String group = groups[i];
            if (Integer.parseInt(group) < userRemappingGidThreshold) {
                String message = "gid: " + group + " below threshold: " + userRemappingGidThreshold;
                throw new ContainerExecutionException(message);
            }
        }
        if (!allowPrivilegedContainerExecution(container)) {
            dockerRunAsUser = uid + ":" + gid;
        } else {
            dockerRunAsUser = ctx.getExecutionAttribute(USER);
        }
    }
    @SuppressWarnings("unchecked")
    List<String> filecacheDirs = ctx.getExecutionAttribute(FILECACHE_DIRS);
    @SuppressWarnings("unchecked")
    List<String> containerLogDirs = ctx.getExecutionAttribute(CONTAINER_LOG_DIRS);
    @SuppressWarnings("unchecked")
    List<String> userFilecacheDirs = ctx.getExecutionAttribute(USER_FILECACHE_DIRS);
    @SuppressWarnings("unchecked")
    List<String> applicationLocalDirs = ctx.getExecutionAttribute(APPLICATION_LOCAL_DIRS);
    @SuppressWarnings("unchecked")
    Map<Path, List<String>> localizedResources = ctx.getExecutionAttribute(LOCALIZED_RESOURCES);
    @SuppressWarnings("unchecked")
    DockerRunCommand runCommand = new DockerRunCommand(containerIdStr, dockerRunAsUser, imageName).setNetworkType(network);
    setHostname(runCommand, containerIdStr, network, hostname);
    if (environment.containsKey(ENV_DOCKER_CONTAINER_PORTS_MAPPING)) {
        String portsMapping = environment.get(ENV_DOCKER_CONTAINER_PORTS_MAPPING);
        for (String mapping : portsMapping.split(",")) {
            if (!Pattern.matches(PORTS_MAPPING_PATTERN, mapping)) {
                throw new ContainerExecutionException("Invalid port mappings: " + mapping);
            }
            runCommand.addPortsMapping(mapping);
        }
    }
    runCommand.setCapabilities(capabilities);
    if (runtime != null && !runtime.isEmpty()) {
        runCommand.addRuntime(runtime);
    }
    if (!serviceMode) {
        runCommand.addAllReadWriteMountLocations(containerLogDirs);
        runCommand.addAllReadWriteMountLocations(applicationLocalDirs);
        runCommand.addAllReadOnlyMountLocations(filecacheDirs);
        runCommand.addAllReadOnlyMountLocations(userFilecacheDirs);
    }
    if (environment.containsKey(ENV_DOCKER_CONTAINER_MOUNTS)) {
        Matcher parsedMounts = USER_MOUNT_PATTERN.matcher(environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
        if (!parsedMounts.find()) {
            throw new ContainerExecutionException("Unable to parse user supplied mount list: " + environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
        }
        parsedMounts.reset();
        long mountCount = 0;
        while (parsedMounts.find()) {
            mountCount++;
            String src = parsedMounts.group(1);
            java.nio.file.Path srcPath = java.nio.file.Paths.get(src);
            if (!srcPath.isAbsolute()) {
                src = mountReadOnlyPath(src, localizedResources);
            }
            String dst = parsedMounts.group(2);
            String mode = parsedMounts.group(4);
            if (mode == null) {
                mode = "rw";
            } else if (!mode.startsWith("ro") && !mode.startsWith("rw")) {
                mode = "rw+" + mode;
            }
            runCommand.addMountLocation(src, dst, mode);
        }
        long commaCount = environment.get(ENV_DOCKER_CONTAINER_MOUNTS).chars().filter(c -> c == ',').count();
        if (mountCount != commaCount + 1) {
            throw new ContainerExecutionException("Unable to parse some mounts in user supplied mount list: " + environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
        }
    }
    if (defaultROMounts != null && !defaultROMounts.isEmpty()) {
        for (String mount : defaultROMounts) {
            String[] dir = StringUtils.split(mount, ':');
            if (dir.length != 2) {
                throw new ContainerExecutionException("Invalid mount : " + mount);
            }
            String src = dir[0];
            String dst = dir[1];
            runCommand.addReadOnlyMountLocation(src, dst);
        }
    }
    if (defaultRWMounts != null && !defaultRWMounts.isEmpty()) {
        for (String mount : defaultRWMounts) {
            String[] dir = StringUtils.split(mount, ':');
            if (dir.length != 2) {
                throw new ContainerExecutionException("Invalid mount : " + mount);
            }
            String src = dir[0];
            String dst = dir[1];
            runCommand.addReadWriteMountLocation(src, dst);
        }
    }
    ContainerVolumePublisher publisher = new ContainerVolumePublisher(container, container.getCsiVolumesRootDir(), this);
    try {
        Map<String, String> volumeMounts = publisher.publishVolumes();
        volumeMounts.forEach((local, remote) -> runCommand.addReadWriteMountLocation(local, remote));
    } catch (YarnException | IOException e) {
        throw new ContainerExecutionException("Container requests for volume resource but we are failed" + " to publish volumes on this node");
    }
    if (environment.containsKey(ENV_DOCKER_CONTAINER_TMPFS_MOUNTS)) {
        String[] tmpfsMounts = environment.get(ENV_DOCKER_CONTAINER_TMPFS_MOUNTS).split(",");
        for (String mount : tmpfsMounts) {
            if (!TMPFS_MOUNT_PATTERN.matcher(mount).matches()) {
                throw new ContainerExecutionException("Invalid tmpfs mount : " + mount);
            }
            runCommand.addTmpfsMount(mount);
        }
    }
    if (defaultTmpfsMounts != null && !defaultTmpfsMounts.isEmpty()) {
        for (String mount : defaultTmpfsMounts) {
            if (!TMPFS_MOUNT_PATTERN.matcher(mount).matches()) {
                throw new ContainerExecutionException("Invalid tmpfs mount : " + mount);
            }
            runCommand.addTmpfsMount(mount);
        }
    }
    if (allowHostPidNamespace(container)) {
        runCommand.setPidNamespace("host");
    }
    if (allowPrivilegedContainerExecution(container)) {
        runCommand.setPrivileged();
    }
    addDockerClientConfigToRunCommand(ctx, runCommand, getAdditionalDockerClientCredentials(clientConfig, containerIdStr));
    String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS);
    addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
    if (environment.containsKey(ENV_DOCKER_CONTAINER_YARN_SYSFS) && Boolean.parseBoolean(environment.get(ENV_DOCKER_CONTAINER_YARN_SYSFS))) {
        runCommand.setYarnSysFS(true);
    }
    List<String> commands = container.getLaunchContext().getCommands();
    if (serviceMode) {
        commands = Arrays.asList(String.join(" ", commands).split("1>")[0].split(" "));
    }
    if (useEntryPoint) {
        runCommand.setOverrideDisabled(true);
        runCommand.addEnv(environment);
        runCommand.setOverrideCommandWithArgs(commands);
        runCommand.disableDetach();
        runCommand.setLogDir(container.getLogDir());
    } else {
        List<String> overrideCommands = new ArrayList<>();
        Path launchDst = new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT);
        overrideCommands.add("bash");
        overrideCommands.add(launchDst.toUri().getPath());
        runCommand.setContainerWorkDir(containerWorkDir.toString());
        runCommand.setOverrideCommandWithArgs(overrideCommands);
        runCommand.detachOnRun();
    }
    if (serviceMode) {
        runCommand.setServiceMode(serviceMode);
    }
    if (enableUserReMapping) {
        if (!allowPrivilegedContainerExecution(container)) {
            runCommand.groupAdd(groups);
        }
    }
    if (nmContext != null && nmContext.getResourcePluginManager().getNameToPlugins() != null) {
        for (ResourcePlugin plugin : nmContext.getResourcePluginManager().getNameToPlugins().values()) {
            DockerCommandPlugin dockerCommandPlugin = plugin.getDockerCommandPluginInstance();
            if (dockerCommandPlugin != null) {
                DockerVolumeCommand dockerVolumeCommand = dockerCommandPlugin.getCreateDockerVolumeCommand(ctx.getContainer());
                if (dockerVolumeCommand != null) {
                    runDockerVolumeCommand(dockerVolumeCommand, container);
                    if (dockerVolumeCommand.getSubCommand().equals(DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND)) {
                        checkDockerVolumeCreated(dockerVolumeCommand, container);
                    }
                }
                dockerCommandPlugin.updateDockerRunCommand(runCommand, container);
            }
        }
    }
    String commandFile = dockerClient.writeCommandToTempFile(runCommand, containerId, nmContext);
    PrivilegedOperation launchOp = buildLaunchOp(ctx, commandFile, runCommand);
    launchOp.disableFailureLogging();
    try {
        privilegedOperationExecutor.executePrivilegedOperation(null, launchOp, null, null, false, false);
    } catch (PrivilegedOperationException e) {
        throw new ContainerExecutionException("Launch container failed", e.getExitCode(), e.getOutput(), e.getErrorOutput());
    }
}
1210069.171517wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1210117.911526wildfly
public PersistentResourceXMLDescription getParserDescription() {
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES)).addChild(builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, CommonAttributes.JGROUPS_CHANNEL, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, CommonAttributes.JGROUPS_CHANNEL, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT)).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.REBALANCE_CONNECTIONS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.ALLOW_LOCAL_TRANSACTIONS, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.CREDENTIAL_REFERENCE, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS, ConnectionFactoryAttributes.Pooled.STATISTICS_ENABLED))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1210181.871529wildfly
public PersistentResourceXMLDescription getParserDescription() {
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES)).addChild(builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT)).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.REBALANCE_CONNECTIONS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.ALLOW_LOCAL_TRANSACTIONS, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.CREDENTIAL_REFERENCE, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS, ConnectionFactoryAttributes.Pooled.STATISTICS_ENABLED))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
129768.491506wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder discoveryGroup = builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(discoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(discoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
129808.181506wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder discoveryGroup = builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(discoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(discoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1210082.461517wildfly
public PersistentResourceXMLDescription getParserDescription() {
    final PersistentResourceXMLBuilder jgroupDiscoveryGroup = builder(JGroupsDiscoveryGroupDefinition.PATH).addAttributes(DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, DiscoveryGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder socketDiscoveryGroup = builder(SocketDiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT);
    final PersistentResourceXMLBuilder remoteConnector = builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder httpConnector = builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, HTTPConnectorDefinition.SERVER_NAME, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder invmConnector = builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS);
    final PersistentResourceXMLBuilder connector = builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS);
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addAttributes(MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_THREAD_POOL_MAX_SIZE, MessagingSubsystemRootResourceDefinition.GLOBAL_CLIENT_SCHEDULED_THREAD_POOL_MAX_SIZE).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(CommonAttributes.HA, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.External.ENABLE_AMQ1_PREFIX, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(createPooledConnectionFactory(true)).addChild(builder(MessagingExtension.EXTERNAL_JMS_QUEUE_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.EXTERNAL_JMS_TOPIC_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES)).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.ELYTRON_DOMAIN, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.CREDENTIAL_REFERENCE, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.JOURNAL_DATASOURCE, ServerDefinition.JOURNAL_MESSAGES_TABLE, ServerDefinition.JOURNAL_BINDINGS_TABLE, ServerDefinition.JOURNAL_JMS_BINDINGS_TABLE, ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE, ServerDefinition.JOURNAL_PAGE_STORE_TABLE, ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE, ServerDefinition.JOURNAL_DATABASE, ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION, ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD, ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT, ServerDefinition.GLOBAL_MAX_DISK_USAGE, ServerDefinition.DISK_SCAN_PERIOD, ServerDefinition.GLOBAL_MAX_MEMORY_SIZE, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(MessagingExtension.CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER, QueueDefinition.ROUTING_TYPE)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES, AddressSettingDefinition.AUTO_CREATE_QUEUES, AddressSettingDefinition.AUTO_DELETE_QUEUES, AddressSettingDefinition.AUTO_CREATE_ADDRESSES, AddressSettingDefinition.AUTO_DELETE_ADDRESSES)).addChild(httpConnector).addChild(remoteConnector).addChild(invmConnector).addChild(connector).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JGROUPS_BROADCAST_GROUP_PATH).addAttributes(BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, BroadcastGroupDefinition.JGROUPS_CHANNEL, CommonAttributes.JGROUPS_CLUSTER, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(MessagingExtension.SOCKET_BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(jgroupDiscoveryGroup).addChild(socketDiscoveryGroup).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.PRODUCER_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.PRODUCER_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CREDENTIAL_REFERENCE, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Common.DESERIALIZATION_BLACKLIST, ConnectionFactoryAttributes.Common.DESERIALIZATION_WHITELIST, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Regular.FACTORY_TYPE, ConnectionFactoryAttributes.Common.USE_TOPOLOGY)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(createPooledConnectionFactory(false))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.SOURCE_CREDENTIAL_REFERENCE, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.TARGET_CREDENTIAL_REFERENCE, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
1312282.123422cassandra
public void testGroupByWithStaticColumnsWithPaging() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, s int static, d int, primary key (a, b, c))");
    execute("UPDATE %s SET s = 1 WHERE a = 1");
    execute("UPDATE %s SET s = 2 WHERE a = 2");
    execute("UPDATE %s SET s = 3 WHERE a = 4");
    for (int pageSize = 1; pageSize < 10; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s", pageSize), row(1, null, 1, 0L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a, b", pageSize), row(1, null, 1), row(2, null, 2), row(4, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b", pageSize), row(1, null, null, 1, null), row(2, null, null, 2, null), row(4, null, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s LIMIT 2", pageSize), row(1, null, 1, 0L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a PER PARTITION LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a PER PARTITION LIMIT 2 LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s GROUP BY a", pageSize), row(1, 1, 1L), row(2, 2, 1L), row(4, 3, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s ", pageSize), row(1, 1, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 1, 1L), row(2, 2, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s LIMIT 2", pageSize), row(1, 1, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, null, 1, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a, b", pageSize), row(1, null, 1, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1", pageSize), row(1, null, 1, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a = 1 GROUP BY a, b", pageSize), row(1, null, 1));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b", pageSize), row(1, null, null, 1, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2", pageSize), row(1, null, 1, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 LIMIT 2", pageSize), row(1, null, 1, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 1, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a = 1", pageSize), row(1, 1, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4)", pageSize), row(1, null, 1, 0L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b", pageSize), row(1, null, 1), row(2, null, 2), row(4, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) LIMIT 2", pageSize), row(1, null, 1, 0L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a PER PARTITION LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a PER PARTITION LIMIT 2 LIMIT 2", pageSize), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, 1, 1L), row(2, 2, 1L), row(4, 3, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4)", pageSize), row(1, 1, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2", pageSize), row(1, 1, 1L), row(2, 2, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4) LIMIT 2", pageSize), row(1, 1, 3L));
    }
    execute("UPDATE %s SET s = 3 WHERE a = 3");
    execute("DELETE s FROM %s WHERE a = 4");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 1, 3)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 2, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 3, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (2, 2, 3, 3)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (2, 4, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (4, 8, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (5, 8, 2, 12)");
    execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2");
    execute("DELETE FROM %s WHERE a = 5");
    for (int pageSize = 1; pageSize < 10; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a", pageSize), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s", pageSize), row(1, 2, 1, 7L, 7L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE b = 2 ALLOW FILTERING", pageSize), row(1, 2, 1, 3L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a", pageSize), row(1, 2, 1), row(2, 2, 2), row(4, 8, null), row(3, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a, b", pageSize), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(4, 8, null), row(3, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a", pageSize), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b", pageSize), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s LIMIT 2", pageSize), row(1, 2, 1, 7L, 7L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 2, 1), row(2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a, b LIMIT 10", pageSize), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(4, 8, null), row(3, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b LIMIT 10", pageSize), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 2", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 3", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2", pageSize), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1), row(2, 2, 2), row(4, 8, null), row(3, null, 3));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s GROUP BY a", pageSize), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L), row(4, null, 1L, 0L), row(3, 3, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s", pageSize), row(1, 1, 4L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s LIMIT 2", pageSize), row(1, 1, 4L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 2, 1, 4L, 4L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 3 GROUP BY a, b", pageSize), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 3", pageSize), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 AND b = 2 GROUP BY a, b", pageSize), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 AND b = 2", pageSize), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 2, 1));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a = 4 GROUP BY a, b", pageSize), row(4, 8, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 2, 1, 1, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 4 GROUP BY a, b", pageSize), row(4, 8, 2, null, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b LIMIT 1", pageSize), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 LIMIT 1", pageSize), row(2, 2, 2, 2L, 2L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a = 2 GROUP BY a, b LIMIT 1", pageSize), row(2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a = 2 GROUP BY a, b LIMIT 2", pageSize), row(2, 2, 2), row(2, 4, 2));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a = 2 GROUP BY a", pageSize), row(2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a = 4 GROUP BY a", pageSize), row(4, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC", pageSize), row(2, 4, 2, 1L, 1L), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 ORDER BY b DESC, c DESC", pageSize), row(2, 4, 2, 2L, 2L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 1", pageSize), row(2, 4, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 ORDER BY b DESC, c DESC LIMIT 2", pageSize), row(2, 4, 2, 2L, 2L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4)", pageSize), row(1, 2, 1, 7L, 7L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) AND b = 2 GROUP BY a, b", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) AND b = 2", pageSize), row(1, 2, 1, 3L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, 2, 1), row(2, 2, 2), row(3, null, 3), row(4, 8, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b", pageSize), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(3, null, 3), row(4, 8, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(3, null, null, 3, null), row(4, 8, 2, null, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b", pageSize), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(3, null, null, 3, null), row(4, 8, 2, null, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2", pageSize), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) LIMIT 2", pageSize), row(1, 2, 1, 7L, 7L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2", pageSize), row(1, 2, 1), row(2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 10", pageSize), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(3, null, 3), row(4, 8, null));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 3", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 3", pageSize), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(3, null, 3, 0L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 3 LIMIT 10", pageSize), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a", pageSize), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L), row(3, 3, 1L, 1L), row(4, null, 1L, 0L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4)", pageSize), row(1, 1, 4L, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2", pageSize), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4) LIMIT 2", pageSize), row(1, 1, 4L, 3L));
    }
}
138463.8736298hadoop
 void testGetFilteredEntities() throws Exception {
    TimelineFilterList infoFilterList = new TimelineFilterList();
    infoFilterList.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info2", 3.5));
    Set<TimelineEntity> result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_3")) {
            fail("Incorrect filtering based on info filters");
        }
    }
    TimelineFilterList confFilterList = new TimelineFilterList();
    confFilterList.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_1", "123"));
    confFilterList.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_3", "abc"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_3")) {
            fail("Incorrect filtering based on config filters");
        }
    }
    TimelineFilterList eventFilters = new TimelineFilterList();
    eventFilters.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "event_2"));
    eventFilters.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "event_4"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().eventFilters(eventFilters).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_3")) {
            fail("Incorrect filtering based on event filters");
        }
    }
    TimelineFilterList metricFilterList = new TimelineFilterList();
    metricFilterList.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "metric3", 0L));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on metric filters");
        }
    }
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_1", "129"));
    list1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_3", "def"));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_2", "23"));
    list2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_3", "abc"));
    TimelineFilterList confFilterList1 = new TimelineFilterList(Operator.OR, list1, list2);
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList1).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on config filters");
        }
    }
    TimelineFilterList list3 = new TimelineFilterList();
    list3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "config_1", "123"));
    list3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "config_3", "abc"));
    TimelineFilterList list4 = new TimelineFilterList();
    list4.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_2", "23"));
    TimelineFilterList confFilterList2 = new TimelineFilterList(Operator.OR, list3, list4);
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList2).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on config filters");
        }
    }
    TimelineFilterList confFilterList3 = new TimelineFilterList();
    confFilterList3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "config_1", "127"));
    confFilterList3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "config_3", "abc"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList3).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on config filters");
        }
    }
    TimelineFilterList confFilterList4 = new TimelineFilterList();
    confFilterList4.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_dummy", "dummy"));
    confFilterList4.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_3", "def"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList4).build(), new TimelineDataToRetrieve());
    assertEquals(0, result.size());
    TimelineFilterList confFilterList5 = new TimelineFilterList(Operator.OR);
    confFilterList5.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_dummy", "dummy"));
    confFilterList5.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_3", "def"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().configFilters(confFilterList5).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on config filters");
        }
    }
    TimelineFilterList list6 = new TimelineFilterList();
    list6.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "metric1", 200));
    list6.addFilter(new TimelineCompareFilter(TimelineCompareOp.EQUAL, "metric3", 23));
    TimelineFilterList list7 = new TimelineFilterList();
    list7.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "metric2", 74));
    TimelineFilterList metricFilterList1 = new TimelineFilterList(Operator.OR, list6, list7);
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_2") && !entity.getId().equals("id_3")) {
            fail("Incorrect filtering based on metric filters");
        }
    }
    TimelineFilterList metricFilterList2 = new TimelineFilterList();
    metricFilterList2.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "metric2", 70));
    metricFilterList2.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "metric3", 23));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList2).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1")) {
            fail("Incorrect filtering based on metric filters");
        }
    }
    TimelineFilterList metricFilterList3 = new TimelineFilterList();
    metricFilterList3.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "dummy_metric", 30));
    metricFilterList3.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "metric3", 23));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList3).build(), new TimelineDataToRetrieve());
    assertEquals(0, result.size());
    TimelineFilterList metricFilterList4 = new TimelineFilterList(Operator.OR);
    metricFilterList4.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "dummy_metric", 30));
    metricFilterList4.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "metric3", 23));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList4).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on metric filters");
        }
    }
    TimelineFilterList metricFilterList5 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "metric2", 74));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList5).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_2")) {
            fail("Incorrect filtering based on metric filters");
        }
    }
    TimelineFilterList infoFilterList1 = new TimelineFilterList();
    infoFilterList1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info2", 3.5));
    infoFilterList1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "info4", 20));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList1).build(), new TimelineDataToRetrieve());
    assertEquals(0, result.size());
    TimelineFilterList infoFilterList2 = new TimelineFilterList(Operator.OR);
    infoFilterList2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info2", 3.5));
    infoFilterList2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info1", "val1"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList2).build(), new TimelineDataToRetrieve());
    assertEquals(2, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1") && !entity.getId().equals("id_3")) {
            fail("Incorrect filtering based on info filters");
        }
    }
    TimelineFilterList infoFilterList3 = new TimelineFilterList();
    infoFilterList3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "dummy_info", 1));
    infoFilterList3.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info2", "val5"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList3).build(), new TimelineDataToRetrieve());
    assertEquals(0, result.size());
    TimelineFilterList infoFilterList4 = new TimelineFilterList(Operator.OR);
    infoFilterList4.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "dummy_info", 1));
    infoFilterList4.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "info2", "val5"));
    result = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", null), new TimelineEntityFilters.Builder().infoFilters(infoFilterList4).build(), new TimelineDataToRetrieve());
    assertEquals(1, result.size());
    for (TimelineEntity entity : result) {
        if (!entity.getId().equals("id_1")) {
            fail("Incorrect filtering based on info filters");
        }
    }
}
135834.8866224wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final EjbJarMetaData metaData = deploymentUnit.getAttachment(EjbDeploymentAttachmentKeys.EJB_JAR_METADATA);
    final EEModuleDescription eeModuleDescription = deploymentUnit.getAttachment(Attachments.EE_MODULE_DESCRIPTION);
    final Module module = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.MODULE);
    final DeploymentReflectionIndex index = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.REFLECTION_INDEX);
    if (metaData == null) {
        return;
    }
    if (metaData.getAssemblyDescriptor() == null) {
        return;
    }
    if (metaData.getAssemblyDescriptor().getInterceptorBindings() == null) {
        return;
    }
    final Set<String> interceptorClasses = new HashSet<String>();
    if (metaData.getInterceptors() != null) {
        for (final InterceptorMetaData interceptor : metaData.getInterceptors()) {
            interceptorClasses.add(interceptor.getInterceptorClass());
        }
    }
    final Map<String, List<InterceptorBindingMetaData>> bindingsPerComponent = new HashMap<String, List<InterceptorBindingMetaData>>();
    final List<InterceptorBindingMetaData> defaultInterceptorBindings = new ArrayList<InterceptorBindingMetaData>();
    for (final InterceptorBindingMetaData binding : metaData.getAssemblyDescriptor().getInterceptorBindings()) {
        if (binding.getEjbName().equals("*")) {
            if (binding.getMethod() != null) {
                throw EjbLogger.ROOT_LOGGER.defaultInterceptorsNotBindToMethod();
            }
            if (binding.getInterceptorOrder() != null) {
                throw EjbLogger.ROOT_LOGGER.defaultInterceptorsNotSpecifyOrder();
            }
            defaultInterceptorBindings.add(binding);
        } else if (ejbNameRegexService.isEjbNameRegexAllowed()) {
            Pattern pattern = Pattern.compile(binding.getEjbName());
            for (final ComponentDescription componentDescription : eeModuleDescription.getComponentDescriptions()) {
                if (componentDescription instanceof EJBComponentDescription) {
                    String ejbName = ((EJBComponentDescription) componentDescription).getEJBName();
                    if (pattern.matcher(ejbName).matches()) {
                        List<InterceptorBindingMetaData> bindings = bindingsPerComponent.get(ejbName);
                        if (bindings == null) {
                            bindingsPerComponent.put(ejbName, bindings = new ArrayList<InterceptorBindingMetaData>());
                        }
                        bindings.add(binding);
                    }
                }
            }
        } else {
            List<InterceptorBindingMetaData> bindings = bindingsPerComponent.get(binding.getEjbName());
            if (bindings == null) {
                bindingsPerComponent.put(binding.getEjbName(), bindings = new ArrayList<InterceptorBindingMetaData>());
            }
            bindings.add(binding);
        }
    }
    final List<InterceptorDescription> defaultInterceptors = new ArrayList<InterceptorDescription>();
    for (InterceptorBindingMetaData binding : defaultInterceptorBindings) {
        if (binding.getInterceptorClasses() != null) {
            for (final String clazz : binding.getInterceptorClasses()) {
                if (interceptorClasses.contains(clazz)) {
                    defaultInterceptors.add(new InterceptorDescription(clazz));
                } else {
                    ROOT_LOGGER.defaultInterceptorClassNotListed(clazz);
                }
            }
        }
    }
    for (final ComponentDescription componentDescription : eeModuleDescription.getComponentDescriptions()) {
        final Class<?> componentClass;
        try {
            componentClass = module.getClassLoader().loadClass(componentDescription.getComponentClassName());
        } catch (ClassNotFoundException e) {
            throw EjbLogger.ROOT_LOGGER.failToLoadComponentClass(e, componentDescription.getComponentClassName());
        }
        final List<InterceptorBindingMetaData> bindings = bindingsPerComponent.get(componentDescription.getComponentName());
        final Map<Method, List<InterceptorBindingMetaData>> methodInterceptors = new HashMap<Method, List<InterceptorBindingMetaData>>();
        final List<InterceptorBindingMetaData> classLevelBindings = new ArrayList<InterceptorBindingMetaData>();
        boolean classLevelExcludeDefaultInterceptors = false;
        Map<Method, Boolean> methodLevelExcludeDefaultInterceptors = new HashMap<Method, Boolean>();
        Map<Method, Boolean> methodLevelExcludeClassInterceptors = new HashMap<Method, Boolean>();
        boolean classLevelAbsoluteOrder = false;
        final Map<Method, Boolean> methodLevelAbsoluteOrder = new HashMap<Method, Boolean>();
        if (bindings != null) {
            for (final InterceptorBindingMetaData binding : bindings) {
                if (binding.getMethod() == null) {
                    classLevelBindings.add(binding);
                    if (binding.isExcludeDefaultInterceptors()) {
                        classLevelExcludeDefaultInterceptors = true;
                    }
                    if (binding.isTotalOrdering()) {
                        if (classLevelAbsoluteOrder) {
                            throw EjbLogger.ROOT_LOGGER.twoEjbBindingsSpecifyAbsoluteOrder(componentClass.toString());
                        } else {
                            classLevelAbsoluteOrder = true;
                        }
                    }
                } else {
                    final NamedMethodMetaData methodData = binding.getMethod();
                    final ClassReflectionIndex classIndex = index.getClassIndex(componentClass);
                    Method resolvedMethod = null;
                    if (methodData.getMethodParams() == null) {
                        final Collection<Method> methods = classIndex.getAllMethods(methodData.getMethodName());
                        if (methods.isEmpty()) {
                            throw EjbLogger.ROOT_LOGGER.failToFindMethodInEjbJarXml(componentClass.getName(), methodData.getMethodName());
                        } else if (methods.size() > 1) {
                            throw EjbLogger.ROOT_LOGGER.multipleMethodReferencedInEjbJarXml(methodData.getMethodName(), componentClass.getName());
                        }
                        resolvedMethod = methods.iterator().next();
                    } else {
                        final Collection<Method> methods = classIndex.getAllMethods(methodData.getMethodName(), methodData.getMethodParams().size());
                        for (final Method method : methods) {
                            boolean match = true;
                            for (int i = 0; i < method.getParameterCount(); ++i) {
                                if (!method.getParameterTypes()[i].getName().equals(methodData.getMethodParams().get(i))) {
                                    match = false;
                                    break;
                                }
                            }
                            if (match) {
                                resolvedMethod = method;
                                break;
                            }
                        }
                        if (resolvedMethod == null) {
                            throw EjbLogger.ROOT_LOGGER.failToFindMethodWithParameterTypes(componentClass.getName(), methodData.getMethodName(), methodData.getMethodParams());
                        }
                    }
                    List<InterceptorBindingMetaData> list = methodInterceptors.get(resolvedMethod);
                    if (list == null) {
                        methodInterceptors.put(resolvedMethod, list = new ArrayList<InterceptorBindingMetaData>());
                    }
                    list.add(binding);
                    if (binding.isExcludeDefaultInterceptors()) {
                        methodLevelExcludeDefaultInterceptors.put(resolvedMethod, true);
                    }
                    if (binding.isExcludeClassInterceptors()) {
                        methodLevelExcludeClassInterceptors.put(resolvedMethod, true);
                    }
                    if (binding.isTotalOrdering()) {
                        if (methodLevelAbsoluteOrder.containsKey(resolvedMethod)) {
                            throw EjbLogger.ROOT_LOGGER.twoEjbBindingsSpecifyAbsoluteOrder(resolvedMethod.toString());
                        } else {
                            methodLevelAbsoluteOrder.put(resolvedMethod, true);
                        }
                    }
                }
            }
        }
        componentDescription.setDefaultInterceptors(defaultInterceptors);
        if (classLevelExcludeDefaultInterceptors) {
            componentDescription.setExcludeDefaultInterceptors(true);
        }
        final List<InterceptorDescription> classLevelInterceptors = new ArrayList<InterceptorDescription>();
        if (classLevelAbsoluteOrder) {
            for (final InterceptorBindingMetaData binding : classLevelBindings) {
                if (binding.isTotalOrdering()) {
                    for (final String interceptor : binding.getInterceptorOrder()) {
                        classLevelInterceptors.add(new InterceptorDescription(interceptor));
                    }
                }
            }
            componentDescription.setExcludeDefaultInterceptors(true);
        } else {
            classLevelInterceptors.addAll(componentDescription.getClassInterceptors());
            for (InterceptorBindingMetaData binding : classLevelBindings) {
                if (binding.getInterceptorClasses() != null) {
                    for (final String interceptor : binding.getInterceptorClasses()) {
                        classLevelInterceptors.add(new InterceptorDescription(interceptor));
                    }
                }
            }
        }
        componentDescription.setClassInterceptors(classLevelInterceptors);
        for (Map.Entry<Method, List<InterceptorBindingMetaData>> entry : methodInterceptors.entrySet()) {
            final Method method = entry.getKey();
            final List<InterceptorBindingMetaData> methodBindings = entry.getValue();
            boolean totalOrder = methodLevelAbsoluteOrder.containsKey(method);
            final MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifierForMethod(method);
            Boolean excludeDefaultInterceptors = methodLevelExcludeDefaultInterceptors.get(method);
            excludeDefaultInterceptors = excludeDefaultInterceptors == null ? Boolean.FALSE : excludeDefaultInterceptors;
            if (!excludeDefaultInterceptors) {
                excludeDefaultInterceptors = componentDescription.isExcludeDefaultInterceptors() || componentDescription.isExcludeDefaultInterceptors(methodIdentifier);
            }
            Boolean excludeClassInterceptors = methodLevelExcludeClassInterceptors.get(method);
            excludeClassInterceptors = excludeClassInterceptors == null ? Boolean.FALSE : excludeClassInterceptors;
            if (!excludeClassInterceptors) {
                excludeClassInterceptors = componentDescription.isExcludeClassInterceptors(methodIdentifier);
            }
            final List<InterceptorDescription> methodLevelInterceptors = new ArrayList<InterceptorDescription>();
            if (totalOrder) {
                for (final InterceptorBindingMetaData binding : methodBindings) {
                    if (binding.isTotalOrdering()) {
                        for (final String interceptor : binding.getInterceptorOrder()) {
                            methodLevelInterceptors.add(new InterceptorDescription(interceptor));
                        }
                    }
                }
            } else {
                if (!excludeDefaultInterceptors) {
                    methodLevelInterceptors.addAll(defaultInterceptors);
                }
                if (!excludeClassInterceptors) {
                    for (InterceptorDescription interceptor : classLevelInterceptors) {
                        methodLevelInterceptors.add(interceptor);
                    }
                }
                List<InterceptorDescription> annotationMethodLevel = componentDescription.getMethodInterceptors().get(methodIdentifier);
                if (annotationMethodLevel != null) {
                    methodLevelInterceptors.addAll(annotationMethodLevel);
                }
                for (InterceptorBindingMetaData binding : methodBindings) {
                    if (binding.getInterceptorClasses() != null) {
                        for (final String interceptor : binding.getInterceptorClasses()) {
                            methodLevelInterceptors.add(new InterceptorDescription(interceptor));
                        }
                    }
                }
            }
            componentDescription.excludeClassInterceptors(methodIdentifier);
            componentDescription.excludeDefaultInterceptors(methodIdentifier);
            componentDescription.setMethodInterceptors(methodIdentifier, methodLevelInterceptors);
        }
    }
}
139469.41499wildfly
public PersistentResourceXMLDescription getParserDescription() {
    return builder(MessagingExtension.SUBSYSTEM_PATH, NAMESPACE).addChild(builder(MessagingExtension.SERVER_PATH).addAttributes(ServerDefinition.PERSISTENCE_ENABLED, ServerDefinition.PERSIST_ID_CACHE, ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY, ServerDefinition.ID_CACHE_SIZE, ServerDefinition.PAGE_MAX_CONCURRENT_IO, ServerDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, ServerDefinition.THREAD_POOL_MAX_SIZE, ServerDefinition.WILD_CARD_ROUTING_ENABLED, ServerDefinition.CONNECTION_TTL_OVERRIDE, ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED, ServerDefinition.SECURITY_DOMAIN, ServerDefinition.SECURITY_ENABLED, ServerDefinition.SECURITY_INVALIDATION_INTERVAL, ServerDefinition.OVERRIDE_IN_VM_SECURITY, ServerDefinition.CLUSTER_USER, ServerDefinition.CLUSTER_PASSWORD, ServerDefinition.MANAGEMENT_ADDRESS, ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS, ServerDefinition.JMX_MANAGEMENT_ENABLED, ServerDefinition.JMX_DOMAIN, ServerDefinition.JOURNAL_TYPE, ServerDefinition.JOURNAL_BUFFER_TIMEOUT, ServerDefinition.JOURNAL_BUFFER_SIZE, ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL, ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL, ServerDefinition.LOG_JOURNAL_WRITE_RATE, ServerDefinition.JOURNAL_FILE_SIZE, ServerDefinition.JOURNAL_MIN_FILES, ServerDefinition.JOURNAL_POOL_FILES, ServerDefinition.JOURNAL_COMPACT_PERCENTAGE, ServerDefinition.JOURNAL_COMPACT_MIN_FILES, ServerDefinition.JOURNAL_MAX_IO, ServerDefinition.CREATE_BINDINGS_DIR, ServerDefinition.CREATE_JOURNAL_DIR, ServerDefinition.STATISTICS_ENABLED, ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD, ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY, ServerDefinition.TRANSACTION_TIMEOUT, ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD, ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY, ServerDefinition.PERF_BLAST_PAGES, ServerDefinition.RUN_SYNC_SPEED_TEST, ServerDefinition.SERVER_DUMP_INTERVAL, ServerDefinition.MEMORY_MEASURE_INTERVAL, ServerDefinition.MEMORY_WARNING_THRESHOLD, CommonAttributes.INCOMING_INTERCEPTORS, CommonAttributes.OUTGOING_INTERCEPTORS).addChild(builder(MessagingExtension.LIVE_ONLY_PATH).addAttributes(ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(REPLICATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(MessagingExtension.REPLICATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.REPLICATION_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET, HAAttributes.EXCLUDED_CONNECTORS).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.CHECK_FOR_LIVE_SERVER, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.CLUSTER_NAME, HAAttributes.GROUP_NAME, HAAttributes.ALLOW_FAILBACK, HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT, HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(SHARED_STORE_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(SHARED_STORE_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS)).addChild(builder(MessagingExtension.SHARED_STORE_COLOCATED_PATH).addAttributes(HAAttributes.REQUEST_BACKUP, HAAttributes.BACKUP_REQUEST_RETRIES, HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL, HAAttributes.MAX_BACKUPS, HAAttributes.BACKUP_PORT_OFFSET).addChild(builder(CONFIGURATION_MASTER_PATH).addAttributes(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN)).addChild(builder(CONFIGURATION_SLAVE_PATH).addAttributes(HAAttributes.ALLOW_FAILBACK, HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN, HAAttributes.RESTART_BACKUP, ScaleDownAttributes.SCALE_DOWN, ScaleDownAttributes.SCALE_DOWN_CLUSTER_NAME, ScaleDownAttributes.SCALE_DOWN_GROUP_NAME, ScaleDownAttributes.SCALE_DOWN_DISCOVERY_GROUP, ScaleDownAttributes.SCALE_DOWN_CONNECTORS))).addChild(builder(MessagingExtension.BINDINGS_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.BINDINGS_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.JOURNAL_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.JOURNAL_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.LARGE_MESSAGES_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.LARGE_MESSAGES_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.PAGING_DIRECTORY_PATH).addAttributes(PathDefinition.PATHS.get(CommonAttributes.PAGING_DIRECTORY), PathDefinition.RELATIVE_TO)).addChild(builder(MessagingExtension.QUEUE_PATH).addAttributes(QueueDefinition.ADDRESS, CommonAttributes.DURABLE, CommonAttributes.FILTER)).addChild(builder(MessagingExtension.SECURITY_SETTING_PATH).addChild(builder(MessagingExtension.ROLE_PATH).addAttributes(SecurityRoleDefinition.SEND, SecurityRoleDefinition.CONSUME, SecurityRoleDefinition.CREATE_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_DURABLE_QUEUE, SecurityRoleDefinition.CREATE_NON_DURABLE_QUEUE, SecurityRoleDefinition.DELETE_NON_DURABLE_QUEUE, SecurityRoleDefinition.MANAGE))).addChild(builder(MessagingExtension.ADDRESS_SETTING_PATH).addAttributes(CommonAttributes.DEAD_LETTER_ADDRESS, CommonAttributes.EXPIRY_ADDRESS, AddressSettingDefinition.EXPIRY_DELAY, AddressSettingDefinition.REDELIVERY_DELAY, AddressSettingDefinition.REDELIVERY_MULTIPLIER, AddressSettingDefinition.MAX_DELIVERY_ATTEMPTS, AddressSettingDefinition.MAX_REDELIVERY_DELAY, AddressSettingDefinition.MAX_SIZE_BYTES, AddressSettingDefinition.PAGE_SIZE_BYTES, AddressSettingDefinition.PAGE_MAX_CACHE_SIZE, AddressSettingDefinition.ADDRESS_FULL_MESSAGE_POLICY, AddressSettingDefinition.MESSAGE_COUNTER_HISTORY_DAY_LIMIT, AddressSettingDefinition.LAST_VALUE_QUEUE, AddressSettingDefinition.REDISTRIBUTION_DELAY, AddressSettingDefinition.SEND_TO_DLA_ON_NO_ROUTE, AddressSettingDefinition.SLOW_CONSUMER_CHECK_PERIOD, AddressSettingDefinition.SLOW_CONSUMER_POLICY, AddressSettingDefinition.SLOW_CONSUMER_THRESHOLD, AddressSettingDefinition.AUTO_CREATE_JMS_QUEUES, AddressSettingDefinition.AUTO_DELETE_JMS_QUEUES)).addChild(builder(MessagingExtension.HTTP_CONNECTOR_PATH).addAttributes(HTTPConnectorDefinition.SOCKET_BINDING, HTTPConnectorDefinition.ENDPOINT, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_CONNECTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_CONNECTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(CONNECTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.HTTP_ACCEPTOR_PATH).addAttributes(HTTPAcceptorDefinition.HTTP_LISTENER, HTTPAcceptorDefinition.UPGRADE_LEGACY, CommonAttributes.PARAMS)).addChild(builder(pathElement(REMOTE_ACCEPTOR)).addAttributes(RemoteTransportDefinition.SOCKET_BINDING, CommonAttributes.PARAMS)).addChild(builder(pathElement(IN_VM_ACCEPTOR)).addAttributes(InVMTransportDefinition.SERVER_ID, CommonAttributes.PARAMS)).addChild(builder(pathElement(ACCEPTOR)).addAttributes(GenericTransportDefinition.SOCKET_BINDING, CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.BROADCAST_GROUP_PATH).addAttributes(CommonAttributes.SOCKET_BINDING, BroadcastGroupDefinition.JGROUPS_CHANNEL_FACTORY, CommonAttributes.JGROUPS_CHANNEL, BroadcastGroupDefinition.BROADCAST_PERIOD, BroadcastGroupDefinition.CONNECTOR_REFS)).addChild(builder(DiscoveryGroupDefinition.PATH).addAttributes(CommonAttributes.SOCKET_BINDING, DiscoveryGroupDefinition.JGROUPS_CHANNEL_FACTORY, CommonAttributes.JGROUPS_CHANNEL, DiscoveryGroupDefinition.REFRESH_TIMEOUT, DiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT)).addChild(builder(MessagingExtension.CLUSTER_CONNECTION_PATH).addAttributes(ClusterConnectionDefinition.ADDRESS, ClusterConnectionDefinition.CONNECTOR_NAME, ClusterConnectionDefinition.CHECK_PERIOD, ClusterConnectionDefinition.CONNECTION_TTL, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CALL_TIMEOUT, ClusterConnectionDefinition.CALL_FAILOVER_TIMEOUT, ClusterConnectionDefinition.RETRY_INTERVAL, ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER, ClusterConnectionDefinition.MAX_RETRY_INTERVAL, ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS, ClusterConnectionDefinition.RECONNECT_ATTEMPTS, ClusterConnectionDefinition.USE_DUPLICATE_DETECTION, ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE, ClusterConnectionDefinition.MAX_HOPS, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS, ClusterConnectionDefinition.NOTIFICATION_INTERVAL, ClusterConnectionDefinition.CONNECTOR_REFS, ClusterConnectionDefinition.ALLOW_DIRECT_CONNECTIONS_ONLY, ClusterConnectionDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.GROUPING_HANDLER_PATH).addAttributes(GroupingHandlerDefinition.TYPE, GroupingHandlerDefinition.GROUPING_HANDLER_ADDRESS, GroupingHandlerDefinition.TIMEOUT, GroupingHandlerDefinition.GROUP_TIMEOUT, GroupingHandlerDefinition.REAPER_PERIOD)).addChild(builder(DivertDefinition.PATH).addAttributes(DivertDefinition.ROUTING_NAME, DivertDefinition.ADDRESS, DivertDefinition.FORWARDING_ADDRESS, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, DivertDefinition.EXCLUSIVE)).addChild(builder(MessagingExtension.BRIDGE_PATH).addAttributes(BridgeDefinition.QUEUE_NAME, BridgeDefinition.FORWARDING_ADDRESS, CommonAttributes.HA, CommonAttributes.FILTER, CommonAttributes.TRANSFORMER_CLASS_NAME, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CHECK_PERIOD, CommonAttributes.CONNECTION_TTL, CommonAttributes.RETRY_INTERVAL, CommonAttributes.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, BridgeDefinition.INITIAL_CONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS, BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE, BridgeDefinition.USE_DUPLICATE_DETECTION, CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE, BridgeDefinition.USER, BridgeDefinition.PASSWORD, BridgeDefinition.CONNECTOR_REFS, BridgeDefinition.DISCOVERY_GROUP_NAME)).addChild(builder(MessagingExtension.CONNECTOR_SERVICE_PATH).addAttributes(CommonAttributes.FACTORY_CLASS, CommonAttributes.PARAMS)).addChild(builder(MessagingExtension.JMS_QUEUE_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.SELECTOR, CommonAttributes.DURABLE, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.JMS_TOPIC_PATH).addAttributes(CommonAttributes.DESTINATION_ENTRIES, CommonAttributes.LEGACY_ENTRIES)).addChild(builder(MessagingExtension.CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Regular.FACTORY_TYPE)).addChild(builder(MessagingExtension.LEGACY_CONNECTION_FACTORY_PATH).addAttributes(LegacyConnectionFactoryDefinition.ENTRIES, LegacyConnectionFactoryDefinition.DISCOVERY_GROUP, LegacyConnectionFactoryDefinition.CONNECTORS, LegacyConnectionFactoryDefinition.AUTO_GROUP, LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND, LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT, LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD, CommonAttributes.CLIENT_ID, LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES, LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE, LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME, LegacyConnectionFactoryDefinition.CONNECTION_TTL, LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE, LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE, LegacyConnectionFactoryDefinition.FACTORY_TYPE, LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION, LegacyConnectionFactoryDefinition.GROUP_ID, LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE, LegacyConnectionFactoryDefinition.HA, LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL, LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE, LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE, LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE, LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE, LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS, LegacyConnectionFactoryDefinition.RETRY_INTERVAL, LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER, LegacyConnectionFactoryDefinition.SCHEDULED_THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.THREAD_POOL_MAX_SIZE, LegacyConnectionFactoryDefinition.TRANSACTION_BATCH_SIZE, LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS)).addChild(builder(MessagingExtension.POOLED_CONNECTION_FACTORY_PATH).addAttributes(ConnectionFactoryAttributes.Common.ENTRIES, ConnectionFactoryAttributes.Common.DISCOVERY_GROUP, ConnectionFactoryAttributes.Common.CONNECTORS, CommonAttributes.HA, ConnectionFactoryAttributes.Common.CLIENT_FAILURE_CHECK_PERIOD, ConnectionFactoryAttributes.Common.CONNECTION_TTL, CommonAttributes.CALL_TIMEOUT, CommonAttributes.CALL_FAILOVER_TIMEOUT, ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE, ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE, ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE, ConnectionFactoryAttributes.Common.PROTOCOL_MANAGER_FACTORY, ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES, ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT, CommonAttributes.MIN_LARGE_MESSAGE_SIZE, CommonAttributes.CLIENT_ID, ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE, ConnectionFactoryAttributes.Common.TRANSACTION_BATCH_SIZE, ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND, ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND, ConnectionFactoryAttributes.Common.AUTO_GROUP, ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE, ConnectionFactoryAttributes.Common.RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RETRY_INTERVAL_MULTIPLIER, CommonAttributes.MAX_RETRY_INTERVAL, ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS, ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION, ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME, ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS, ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.THREAD_POOL_MAX_SIZE, ConnectionFactoryAttributes.Common.GROUP_ID, ConnectionFactoryAttributes.Pooled.USE_JNDI, ConnectionFactoryAttributes.Pooled.JNDI_PARAMS, ConnectionFactoryAttributes.Pooled.USE_LOCAL_TX, ConnectionFactoryAttributes.Pooled.SETUP_ATTEMPTS, ConnectionFactoryAttributes.Pooled.SETUP_INTERVAL, ConnectionFactoryAttributes.Pooled.TRANSACTION, ConnectionFactoryAttributes.Pooled.USER, ConnectionFactoryAttributes.Pooled.PASSWORD, ConnectionFactoryAttributes.Pooled.MIN_POOL_SIZE, ConnectionFactoryAttributes.Pooled.USE_AUTO_RECOVERY, ConnectionFactoryAttributes.Pooled.MAX_POOL_SIZE, ConnectionFactoryAttributes.Pooled.MANAGED_CONNECTION_POOL, ConnectionFactoryAttributes.Pooled.ENLISTMENT_TRACE, ConnectionFactoryAttributes.Common.INITIAL_MESSAGE_PACKET_SIZE, ConnectionFactoryAttributes.Pooled.INITIAL_CONNECT_ATTEMPTS))).addChild(builder(MessagingExtension.JMS_BRIDGE_PATH).addAttributes(JMSBridgeDefinition.MODULE, JMSBridgeDefinition.QUALITY_OF_SERVICE, JMSBridgeDefinition.FAILURE_RETRY_INTERVAL, JMSBridgeDefinition.MAX_RETRIES, JMSBridgeDefinition.MAX_BATCH_SIZE, JMSBridgeDefinition.MAX_BATCH_TIME, CommonAttributes.SELECTOR, JMSBridgeDefinition.SUBSCRIPTION_NAME, CommonAttributes.CLIENT_ID, JMSBridgeDefinition.ADD_MESSAGE_ID_IN_HEADER, JMSBridgeDefinition.SOURCE_CONNECTION_FACTORY, JMSBridgeDefinition.SOURCE_DESTINATION, JMSBridgeDefinition.SOURCE_USER, JMSBridgeDefinition.SOURCE_PASSWORD, JMSBridgeDefinition.TARGET_CONNECTION_FACTORY, JMSBridgeDefinition.TARGET_DESTINATION, JMSBridgeDefinition.TARGET_USER, JMSBridgeDefinition.TARGET_PASSWORD, JMSBridgeDefinition.SOURCE_CONTEXT, JMSBridgeDefinition.TARGET_CONTEXT)).build();
}
139225.2760219wildfly
public void test(@ArquillianResource(TimerServlet.class) @OperateOnDeployment(DEPLOYMENT_1) URL baseURL1, @ArquillianResource(TimerServlet.class) @OperateOnDeployment(DEPLOYMENT_2) URL baseURL2) throws IOException, URISyntaxException {
    Map<String, URI> uris = new TreeMap<>();
    uris.put(NODE_1, TimerServlet.createURI(baseURL1, this.moduleName));
    uris.put(NODE_2, TimerServlet.createURI(baseURL2, this.moduleName));
    List<Class<? extends TimerBean>> singleActionTimerBeanClasses = List.of(SingleActionPersistentTimerBean.class, SingleActionTransientTimerBean.class);
    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        TimeUnit.SECONDS.sleep(2);
        try (CloseableHttpResponse response = client.execute(new HttpPut(uris.get(NODE_1)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpHead(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Class<? extends TimerBean> beanClass : TimerServlet.TIMER_CLASSES) {
                    int count = Integer.parseInt(response.getFirstHeader(beanClass.getName()).getValue());
                    if (TimerServlet.MANUAL_TRANSIENT_TIMER_CLASSES.contains(beanClass) && entry.getKey().equals(NODE_2)) {
                        Assert.assertEquals(entry.getKey() + ": " + beanClass.getName(), 0, count);
                    } else {
                        Assert.assertEquals(entry.getKey() + ": " + beanClass.getName(), 1, count);
                    }
                }
            }
        }
        TimeUnit.SECONDS.sleep(2);
        Map<Class<? extends TimerBean>, Map<String, List<Instant>>> timeouts = new IdentityHashMap<>();
        for (Class<? extends TimerBean> beanClass : TimerServlet.TIMER_CLASSES) {
            timeouts.put(beanClass, new TreeMap<>());
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                    beanEntry.getValue().put(entry.getKey(), parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
                }
            }
        }
        for (Class<? extends TimerBean> beanClass : singleActionTimerBeanClasses) {
            Map<String, List<Instant>> singleActionTimeouts = timeouts.remove(beanClass);
            Assert.assertEquals(singleActionTimeouts.toString(), 1, singleActionTimeouts.get(NODE_1).size());
            Assert.assertEquals(singleActionTimeouts.toString(), 0, singleActionTimeouts.get(NODE_2).size());
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
            if (ManualTimerBean.class.isAssignableFrom(beanEntry.getKey())) {
                Assert.assertFalse(beanEntry.toString(), beanEntry.getValue().get(NODE_1).isEmpty());
                Assert.assertTrue(beanEntry.toString(), beanEntry.getValue().get(NODE_2).isEmpty());
            } else if (AutoTransientTimerBean.class.equals(beanEntry.getKey())) {
                Assert.assertFalse(beanEntry.toString(), beanEntry.getValue().get(NODE_1).isEmpty());
                Assert.assertFalse(beanEntry.toString(), beanEntry.getValue().get(NODE_2).isEmpty());
            } else {
                Assert.assertTrue(beanEntry.toString(), !beanEntry.getValue().get(NODE_1).isEmpty() || !beanEntry.getValue().get(NODE_2).isEmpty());
            }
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpHead(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Class<? extends TimerBean> beanClass : TimerServlet.TIMER_CLASSES) {
                    int count = Integer.parseInt(response.getFirstHeader(beanClass.getName()).getValue());
                    if (TimerServlet.SINGLE_ACTION_TIMER_CLASSES.contains(beanClass) || (TimerServlet.MANUAL_TRANSIENT_TIMER_CLASSES.contains(beanClass) && entry.getKey().equals(NODE_2))) {
                        Assert.assertEquals(entry.getKey() + ": " + beanClass.getName(), 0, count);
                    } else {
                        Assert.assertEquals(entry.getKey() + ": " + beanClass.getName(), 1, count);
                    }
                }
            }
        }
        TimeUnit.SECONDS.sleep(2);
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        for (Class<? extends TimerBean> singleActionTimerBeanClass : singleActionTimerBeanClasses) {
            timeouts.put(singleActionTimerBeanClass, new TreeMap<>());
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                    beanEntry.getValue().put(entry.getKey(), parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
                }
            }
        }
        for (Class<? extends TimerBean> singleActionTimerBeanClass : singleActionTimerBeanClasses) {
            Map<String, List<Instant>> singleActionTimers = timeouts.remove(singleActionTimerBeanClass);
            Assert.assertEquals(singleActionTimers.toString(), 0, singleActionTimers.get(NODE_1).size());
            Assert.assertEquals(singleActionTimers.toString(), 0, singleActionTimers.get(NODE_2).size());
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
            if (ManualTimerBean.class.isAssignableFrom(beanEntry.getKey())) {
                Assert.assertFalse(beanEntry.toString(), beanEntry.getValue().get(NODE_1).isEmpty());
                Assert.assertTrue(beanEntry.toString(), beanEntry.getValue().get(NODE_2).isEmpty());
            } else {
                Assert.assertTrue(beanEntry.toString(), !beanEntry.getValue().get(NODE_1).isEmpty() || !beanEntry.getValue().get(NODE_2).isEmpty());
            }
        }
        this.stop(NODE_1);
        TimeUnit.SECONDS.sleep(2);
        try (CloseableHttpResponse response = client.execute(new HttpHead(uris.get(NODE_2)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            for (Class<? extends TimerBean> beanClass : TimerServlet.TIMER_CLASSES) {
                int count = Integer.parseInt(response.getFirstHeader(beanClass.getName()).getValue());
                if (TimerServlet.SINGLE_ACTION_TIMER_CLASSES.contains(beanClass) || TimerServlet.MANUAL_TRANSIENT_TIMER_CLASSES.contains(beanClass)) {
                    Assert.assertEquals(beanClass.getName(), 0, count);
                } else {
                    Assert.assertEquals(beanClass.getName(), 1, count);
                }
            }
        }
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uris.get(NODE_2)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                beanEntry.getValue().put(NODE_2, parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
            }
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> entry : timeouts.entrySet()) {
            if (TimerServlet.PERSISTENT_TIMER_CLASSES.contains(entry.getKey()) || AutoTimerBean.class.isAssignableFrom(entry.getKey())) {
                Assert.assertNotEquals(entry.toString(), 0, entry.getValue().get(NODE_2).size());
            } else {
                Assert.assertEquals(entry.toString(), 0, entry.getValue().get(NODE_2).size());
            }
        }
        this.start(NODE_1);
        TimeUnit.SECONDS.sleep(2);
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                    beanEntry.getValue().put(entry.getKey(), parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
                }
            }
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> entry : timeouts.entrySet()) {
            if (AutoTransientTimerBean.class.equals(entry.getKey())) {
                Assert.assertFalse(entry.toString(), entry.getValue().get(NODE_1).isEmpty());
                Assert.assertFalse(entry.toString(), entry.getValue().get(NODE_2).isEmpty());
            } else if (TimerServlet.TRANSIENT_TIMER_CLASSES.contains(entry.getKey())) {
                Assert.assertTrue(entry.toString(), entry.getValue().get(NODE_1).isEmpty());
                Assert.assertTrue(entry.toString(), entry.getValue().get(NODE_2).isEmpty());
            } else {
                Assert.assertFalse(entry.toString(), entry.getValue().get(NODE_1).isEmpty() && entry.getValue().get(NODE_2).isEmpty());
            }
        }
        TimeUnit.SECONDS.sleep(2);
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                    beanEntry.getValue().put(entry.getKey(), parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
                }
            }
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> entry : timeouts.entrySet()) {
            if (AutoTransientTimerBean.class.equals(entry.getKey())) {
                Assert.assertFalse(entry.toString(), entry.getValue().get(NODE_1).isEmpty());
                Assert.assertFalse(entry.toString(), entry.getValue().get(NODE_2).isEmpty());
            } else if (TimerServlet.TRANSIENT_TIMER_CLASSES.contains(entry.getKey())) {
                Assert.assertTrue(entry.toString(), entry.getValue().get(NODE_1).isEmpty());
                Assert.assertTrue(entry.toString(), entry.getValue().get(NODE_2).isEmpty());
            } else {
                Assert.assertTrue(entry.toString(), entry.getValue().get(NODE_1).isEmpty() ^ entry.getValue().get(NODE_2).isEmpty());
            }
        }
        this.stop(NODE_2);
        TimeUnit.SECONDS.sleep(2);
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uris.get(NODE_1)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                beanEntry.getValue().put(NODE_1, parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
            }
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> entry : timeouts.entrySet()) {
            if (TimerServlet.PERSISTENT_TIMER_CLASSES.contains(entry.getKey()) || AutoTimerBean.class.isAssignableFrom(entry.getKey())) {
                Assert.assertNotEquals(entry.toString(), 0, entry.getValue().get(NODE_1).size());
            } else {
                Assert.assertEquals(entry.toString(), 0, entry.getValue().get(NODE_1).size());
            }
        }
        this.start(NODE_2);
        TimeUnit.SECONDS.sleep(2);
        try (CloseableHttpResponse response = client.execute(new HttpDelete(uris.get(NODE_1)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
        }
        Instant cancellation = Instant.now();
        TimeUnit.SECONDS.sleep(2);
        for (Map<String, List<Instant>> beanTimeouts : timeouts.values()) {
            beanTimeouts.clear();
        }
        for (Map.Entry<String, URI> entry : uris.entrySet()) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(entry.getValue()))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> beanEntry : timeouts.entrySet()) {
                    beanEntry.getValue().put(entry.getKey(), parseTimeouts(response.getHeaders(beanEntry.getKey().getName())));
                }
            }
        }
        for (Map.Entry<Class<? extends TimerBean>, Map<String, List<Instant>>> entry : timeouts.entrySet()) {
            for (String node : uris.keySet()) {
                if (ManualTimerBean.class.isAssignableFrom(entry.getKey())) {
                    Assert.assertTrue(cancellation + " " + entry.toString(), entry.getValue().get(node).stream().allMatch(instant -> instant.isBefore(cancellation)));
                } else {
                    Assert.assertTrue(entry.toString(), !entry.getValue().get(NODE_1).isEmpty() || !entry.getValue().get(NODE_2).isEmpty());
                }
            }
        }
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}
1313054.6420340wildfly
public void test(@ArquillianResource(SessionOperationServlet.class) @OperateOnDeployment(DEPLOYMENT_1) URL baseURL1, @ArquillianResource(SessionOperationServlet.class) @OperateOnDeployment(DEPLOYMENT_2) URL baseURL2) throws IOException, URISyntaxException, InterruptedException {
    String sessionId;
    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL1, "a")))) {
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            sessionId = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.CREATED_SESSIONS).getValue());
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL1, "a", "1")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.ADDED_ATTRIBUTES).getValue());
            Assert.assertEquals("1", response.getFirstHeader(SessionOperationServlet.BOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertEquals("1", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createRemoveURI(baseURL2, "b")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL1, "a", "2")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.REPLACED_ATTRIBUTES).getValue());
            Assert.assertEquals("2", response.getFirstHeader(SessionOperationServlet.BOUND_ATTRIBUTES).getValue());
            Assert.assertEquals("1", response.getFirstHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertEquals("2", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL1, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.REMOVED_ATTRIBUTES).getValue());
            Assert.assertEquals("2", response.getFirstHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL1, "a", "3", "4")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.ADDED_ATTRIBUTES).getValue());
            Assert.assertEquals("3", response.getFirstHeader(SessionOperationServlet.BOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertEquals("4", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createRemoveURI(baseURL1, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.REMOVED_ATTRIBUTES).getValue());
            Assert.assertEquals("4", response.getFirstHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL2, "a", "5")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.ADDED_ATTRIBUTES).getValue());
            Assert.assertEquals("5", response.getFirstHeader(SessionOperationServlet.BOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createInvalidateURI(baseURL1)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals(response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue(), response.getFirstHeader(SessionOperationServlet.DESTROYED_SESSIONS).getValue());
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.REMOVED_ATTRIBUTES).getValue());
            Assert.assertEquals("5", response.getFirstHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES).getValue());
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createSetURI(baseURL2, "a", "6")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            sessionId = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            Assert.assertEquals(response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue(), response.getFirstHeader(SessionOperationServlet.CREATED_SESSIONS).getValue());
            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.ADDED_ATTRIBUTES).getValue());
            Assert.assertEquals("6", response.getFirstHeader(SessionOperationServlet.BOUND_ATTRIBUTES).getValue());
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetAndSetURI(baseURL2, "a", "7")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertEquals("6", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL2, "a")))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertEquals("7", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
        }
        if (!this.transactional) {
            Thread.sleep(AbstractClusteringTestCase.GRACE_TIME_TO_REPLICATE);
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createTimeoutURI(baseURL1, 1)))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.RESULT));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
            Assert.assertFalse(response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
            sessionId = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
        }
        Instant start = Instant.now();
        while (Instant.now().isBefore(start.plus(EXPIRATION_DURATION))) {
            try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL1, "a")))) {
                Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
            }
            Thread.sleep(100);
        }
    }
    Thread.sleep(EXPIRATION_DURATION.toMillis());
    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        boolean destroyed = false;
        String newSessionId = null;
        int maxAttempts = 30;
        for (int attempt = 1; attempt <= maxAttempts && !destroyed; attempt++) {
            for (URL baseURL : Arrays.asList(baseURL1, baseURL2)) {
                if (!destroyed) {
                    try (CloseableHttpResponse response = client.execute(new HttpGet(SessionOperationServlet.createGetURI(baseURL, "a", sessionId)))) {
                        Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                        Assert.assertFalse(response.containsHeader(SessionOperationServlet.RESULT));
                        Assert.assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                        Assert.assertEquals(newSessionId == null, response.containsHeader(SessionOperationServlet.CREATED_SESSIONS));
                        if (newSessionId == null) {
                            newSessionId = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
                        } else {
                            Assert.assertEquals(newSessionId, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                        }
                        destroyed = response.containsHeader(SessionOperationServlet.DESTROYED_SESSIONS);
                        Assert.assertFalse(response.containsHeader(SessionOperationServlet.ADDED_ATTRIBUTES));
                        Assert.assertFalse(response.containsHeader(SessionOperationServlet.REPLACED_ATTRIBUTES));
                        Assert.assertEquals(destroyed, response.containsHeader(SessionOperationServlet.REMOVED_ATTRIBUTES));
                        Assert.assertFalse(response.containsHeader(SessionOperationServlet.BOUND_ATTRIBUTES));
                        Assert.assertEquals(destroyed, response.containsHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES));
                        if (destroyed) {
                            Assert.assertEquals(sessionId, response.getFirstHeader(SessionOperationServlet.DESTROYED_SESSIONS).getValue());
                            Assert.assertEquals("a", response.getFirstHeader(SessionOperationServlet.REMOVED_ATTRIBUTES).getValue());
                            Assert.assertEquals("7", response.getFirstHeader(SessionOperationServlet.UNBOUND_ATTRIBUTES).getValue());
                            log.infof("Session destroyed within %d attempts.", attempt);
                        }
                    }
                }
            }
            Thread.sleep(TimeUnit.SECONDS.toMillis(1));
        }
        Assert.assertTrue("Session has not been destroyed following expiration within " + maxAttempts + " attempts.", destroyed);
    }
}
1411480.01405cassandra
public void testMixedOrderColumns1() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b, c, d, e)) WITH " + " CLUSTERING ORDER BY (b DESC, c ASC, d DESC, e ASC)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, -1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 1, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, -1, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, -1, 0, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, -1, 0, 0, 0);
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b)>(?)", 0, 2, 0, 1, 1, -1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b)>=(?)", 0, 2, 0, 1, 1, -1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d)>=(?,?,?)" + "AND (b,c,d,e)<(?,?,?,?) ", 0, 1, 1, 0, 1, 1, 0, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)>(?,?,?,?)" + "AND (b,c,d)<=(?,?,?) ", 0, -1, 0, -1, -1, 2, 0, -1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e) < (?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 1, 0, 0, 0, 1, 0, -1, -1), row(0, 1, 0, 0, -1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e) <= (?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 1, 0, 0, 0, 1, 0, -1, -1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b)<(?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, -1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b)<(?) " + "AND (b)>(?)", 0, 2, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b)<(?) " + "AND (b)>=(?)", 0, 2, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, 1, 1, -1, 0, -1, -1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c)<=(?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, 0, -1, -1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d)<=(?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, -1, 0, -1, -1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)>(?,?,?,?)" + "AND (b,c,d)<=(?,?,?) ", 0, -1, 0, -1, -1, 2, 0, -1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d)>=(?,?,?)" + "AND (b,c,d,e)<(?,?,?,?) ", 0, 1, 1, 0, 1, 1, 0, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<(?,?,?,?) " + "AND (b,c,d)>=(?,?,?)", 0, 1, 1, 0, 1, 1, 1, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c)<(?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c)<(?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) <= (?,?,?,?)", 0, 1, 0, 0, 0), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, -1, -1), row(0, 0, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) > (?,?,?,?)", 0, 1, 0, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) >= (?,?,?,?)", 0, 1, 0, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d) >= (?,?,?)", 0, 1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d) > (?,?,?)", 0, 1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0));
}
143682.2185178elasticsearch
 static DateFormatter forPattern(String input) {
    if (Strings.hasLength(input)) {
        input = input.trim();
    }
    if (input == null || input.isEmpty()) {
        throw new IllegalArgumentException("No date pattern provided");
    }
    if (FormatNames.ISO8601.matches(input)) {
        return ISO_8601;
    } else if (FormatNames.BASIC_DATE.matches(input)) {
        return BASIC_DATE;
    } else if (FormatNames.BASIC_DATE_TIME.matches(input)) {
        return BASIC_DATE_TIME;
    } else if (FormatNames.BASIC_DATE_TIME_NO_MILLIS.matches(input)) {
        return BASIC_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.BASIC_ORDINAL_DATE.matches(input)) {
        return BASIC_ORDINAL_DATE;
    } else if (FormatNames.BASIC_ORDINAL_DATE_TIME.matches(input)) {
        return BASIC_ORDINAL_DATE_TIME;
    } else if (FormatNames.BASIC_ORDINAL_DATE_TIME_NO_MILLIS.matches(input)) {
        return BASIC_ORDINAL_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.BASIC_TIME.matches(input)) {
        return BASIC_TIME;
    } else if (FormatNames.BASIC_TIME_NO_MILLIS.matches(input)) {
        return BASIC_TIME_NO_MILLIS;
    } else if (FormatNames.BASIC_T_TIME.matches(input)) {
        return BASIC_T_TIME;
    } else if (FormatNames.BASIC_T_TIME_NO_MILLIS.matches(input)) {
        return BASIC_T_TIME_NO_MILLIS;
    } else if (FormatNames.BASIC_WEEK_DATE.matches(input)) {
        return BASIC_WEEK_DATE;
    } else if (FormatNames.BASIC_WEEK_DATE_TIME.matches(input)) {
        return BASIC_WEEK_DATE_TIME;
    } else if (FormatNames.BASIC_WEEK_DATE_TIME_NO_MILLIS.matches(input)) {
        return BASIC_WEEK_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.DATE.matches(input)) {
        return DATE;
    } else if (FormatNames.DATE_HOUR.matches(input)) {
        return DATE_HOUR;
    } else if (FormatNames.DATE_HOUR_MINUTE.matches(input)) {
        return DATE_HOUR_MINUTE;
    } else if (FormatNames.DATE_HOUR_MINUTE_SECOND.matches(input)) {
        return DATE_HOUR_MINUTE_SECOND;
    } else if (FormatNames.DATE_HOUR_MINUTE_SECOND_FRACTION.matches(input)) {
        return DATE_HOUR_MINUTE_SECOND_FRACTION;
    } else if (FormatNames.DATE_HOUR_MINUTE_SECOND_MILLIS.matches(input)) {
        return DATE_HOUR_MINUTE_SECOND_MILLIS;
    } else if (FormatNames.DATE_OPTIONAL_TIME.matches(input)) {
        return DATE_OPTIONAL_TIME;
    } else if (FormatNames.DATE_TIME.matches(input)) {
        return DATE_TIME;
    } else if (FormatNames.DATE_TIME_NO_MILLIS.matches(input)) {
        return DATE_TIME_NO_MILLIS;
    } else if (FormatNames.HOUR.matches(input)) {
        return HOUR;
    } else if (FormatNames.HOUR_MINUTE.matches(input)) {
        return HOUR_MINUTE;
    } else if (FormatNames.HOUR_MINUTE_SECOND.matches(input)) {
        return HOUR_MINUTE_SECOND;
    } else if (FormatNames.HOUR_MINUTE_SECOND_FRACTION.matches(input)) {
        return HOUR_MINUTE_SECOND_FRACTION;
    } else if (FormatNames.HOUR_MINUTE_SECOND_MILLIS.matches(input)) {
        return HOUR_MINUTE_SECOND_MILLIS;
    } else if (FormatNames.ORDINAL_DATE.matches(input)) {
        return ORDINAL_DATE;
    } else if (FormatNames.ORDINAL_DATE_TIME.matches(input)) {
        return ORDINAL_DATE_TIME;
    } else if (FormatNames.ORDINAL_DATE_TIME_NO_MILLIS.matches(input)) {
        return ORDINAL_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.TIME.matches(input)) {
        return TIME;
    } else if (FormatNames.TIME_NO_MILLIS.matches(input)) {
        return TIME_NO_MILLIS;
    } else if (FormatNames.T_TIME.matches(input)) {
        return T_TIME;
    } else if (FormatNames.T_TIME_NO_MILLIS.matches(input)) {
        return T_TIME_NO_MILLIS;
    } else if (FormatNames.WEEK_DATE.matches(input)) {
        return WEEK_DATE;
    } else if (FormatNames.WEEK_DATE_TIME.matches(input)) {
        return WEEK_DATE_TIME;
    } else if (FormatNames.WEEK_DATE_TIME_NO_MILLIS.matches(input)) {
        return WEEK_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.WEEKYEAR.matches(input)) {
        return WEEKYEAR;
    } else if (FormatNames.WEEK_YEAR_WEEK.matches(input)) {
        return WEEKYEAR_WEEK;
    } else if (FormatNames.WEEKYEAR_WEEK_DAY.matches(input)) {
        return WEEKYEAR_WEEK_DAY;
    } else if (FormatNames.YEAR.matches(input)) {
        return YEAR;
    } else if (FormatNames.YEAR_MONTH.matches(input)) {
        return YEAR_MONTH;
    } else if (FormatNames.YEAR_MONTH_DAY.matches(input)) {
        return YEAR_MONTH_DAY;
    } else if (FormatNames.EPOCH_SECOND.matches(input)) {
        return EpochTime.SECONDS_FORMATTER;
    } else if (FormatNames.EPOCH_MILLIS.matches(input)) {
        return EpochTime.MILLIS_FORMATTER;
    } else if (FormatNames.STRICT_BASIC_WEEK_DATE.matches(input)) {
        return STRICT_BASIC_WEEK_DATE;
    } else if (FormatNames.STRICT_BASIC_WEEK_DATE_TIME.matches(input)) {
        return STRICT_BASIC_WEEK_DATE_TIME;
    } else if (FormatNames.STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS.matches(input)) {
        return STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_DATE.matches(input)) {
        return STRICT_DATE;
    } else if (FormatNames.STRICT_DATE_HOUR.matches(input)) {
        return STRICT_DATE_HOUR;
    } else if (FormatNames.STRICT_DATE_HOUR_MINUTE.matches(input)) {
        return STRICT_DATE_HOUR_MINUTE;
    } else if (FormatNames.STRICT_DATE_HOUR_MINUTE_SECOND.matches(input)) {
        return STRICT_DATE_HOUR_MINUTE_SECOND;
    } else if (FormatNames.STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION.matches(input)) {
        return STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION;
    } else if (FormatNames.STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS.matches(input)) {
        return STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS;
    } else if (FormatNames.STRICT_DATE_OPTIONAL_TIME.matches(input)) {
        return STRICT_DATE_OPTIONAL_TIME;
    } else if (FormatNames.STRICT_DATE_OPTIONAL_TIME_NANOS.matches(input)) {
        return STRICT_DATE_OPTIONAL_TIME_NANOS;
    } else if (FormatNames.STRICT_DATE_TIME.matches(input)) {
        return STRICT_DATE_TIME;
    } else if (FormatNames.STRICT_DATE_TIME_NO_MILLIS.matches(input)) {
        return STRICT_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_HOUR.matches(input)) {
        return STRICT_HOUR;
    } else if (FormatNames.STRICT_HOUR_MINUTE.matches(input)) {
        return STRICT_HOUR_MINUTE;
    } else if (FormatNames.STRICT_HOUR_MINUTE_SECOND.matches(input)) {
        return STRICT_HOUR_MINUTE_SECOND;
    } else if (FormatNames.STRICT_HOUR_MINUTE_SECOND_FRACTION.matches(input)) {
        return STRICT_HOUR_MINUTE_SECOND_FRACTION;
    } else if (FormatNames.STRICT_HOUR_MINUTE_SECOND_MILLIS.matches(input)) {
        return STRICT_HOUR_MINUTE_SECOND_MILLIS;
    } else if (FormatNames.STRICT_ORDINAL_DATE.matches(input)) {
        return STRICT_ORDINAL_DATE;
    } else if (FormatNames.STRICT_ORDINAL_DATE_TIME.matches(input)) {
        return STRICT_ORDINAL_DATE_TIME;
    } else if (FormatNames.STRICT_ORDINAL_DATE_TIME_NO_MILLIS.matches(input)) {
        return STRICT_ORDINAL_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_TIME.matches(input)) {
        return STRICT_TIME;
    } else if (FormatNames.STRICT_TIME_NO_MILLIS.matches(input)) {
        return STRICT_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_T_TIME.matches(input)) {
        return STRICT_T_TIME;
    } else if (FormatNames.STRICT_T_TIME_NO_MILLIS.matches(input)) {
        return STRICT_T_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_WEEK_DATE.matches(input)) {
        return STRICT_WEEK_DATE;
    } else if (FormatNames.STRICT_WEEK_DATE_TIME.matches(input)) {
        return STRICT_WEEK_DATE_TIME;
    } else if (FormatNames.STRICT_WEEK_DATE_TIME_NO_MILLIS.matches(input)) {
        return STRICT_WEEK_DATE_TIME_NO_MILLIS;
    } else if (FormatNames.STRICT_WEEKYEAR.matches(input)) {
        return STRICT_WEEKYEAR;
    } else if (FormatNames.STRICT_WEEKYEAR_WEEK.matches(input)) {
        return STRICT_WEEKYEAR_WEEK;
    } else if (FormatNames.STRICT_WEEKYEAR_WEEK_DAY.matches(input)) {
        return STRICT_WEEKYEAR_WEEK_DAY;
    } else if (FormatNames.STRICT_YEAR.matches(input)) {
        return STRICT_YEAR;
    } else if (FormatNames.STRICT_YEAR_MONTH.matches(input)) {
        return STRICT_YEAR_MONTH;
    } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) {
        return STRICT_YEAR_MONTH_DAY;
    } else {
        try {
            return newDateFormatter(input, new DateTimeFormatterBuilder().appendPattern(input).toFormatter(Locale.ROOT).withResolverStyle(ResolverStyle.STRICT));
        } catch (IllegalArgumentException | ClassCastException e) {
            throw new IllegalArgumentException("Invalid format: [" + input + "]: " + e.getMessage(), e);
        }
    }
}
1411189.356367elasticsearch
public void testValidate() throws IOException {
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.scroll((Scroll) null);
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNull(validationErrors);
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        searchRequest.source().trackTotalHits(false);
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("disabling [track_total_hits] is not allowed in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        searchRequest.source().from(10);
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("using [from] is not allowed in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder().size(0));
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[size] cannot be [0] in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        searchRequest.source().searchAfter(new String[] { "value" });
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[search_after] cannot be used in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        searchRequest.source().collapse(new CollapseBuilder("field"));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("cannot use `collapse` in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.scroll((Scroll) null);
        searchRequest.source().searchAfter(new String[] { "value" });
        searchRequest.source().from(10);
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[from] parameter must be set to 0 when [search_after] is used", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.scroll((Scroll) null);
        searchRequest.source().pointInTimeBuilder(null);
        searchRequest.source().slice(new SliceBuilder(1, 10));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[slice] can only be used with [scroll] or [point-in-time] requests", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.scroll((Scroll) null);
        searchRequest.source().storedField("_none_");
        searchRequest.source().fetchSource(true);
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[stored_fields] cannot be disabled if [_source] is requested", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.scroll((Scroll) null);
        searchRequest.source().storedField("_none_");
        searchRequest.source().fetchSource(false);
        searchRequest.source().fetchField("field");
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[stored_fields] cannot be disabled when using the [fields] option", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        if (searchRequest.scroll() != null) {
            searchRequest.requestCache(false);
        }
        searchRequest.source().subSearches(List.of(new SubSearchSourceBuilder(new TermQueryBuilder("three", "four")), new SubSearchSourceBuilder(new TermQueryBuilder("five", "six"))));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[sub_searches] requires [rank]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder());
        searchRequest.source().addRescorer(new QueryRescorerBuilder(QueryBuilders.matchAllQuery()));
        searchRequest.requestCache(false);
        searchRequest.scroll(new TimeValue(1000));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("using [rescore] is not allowed in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray(Base64.getUrlEncoder().encode("id".getBytes(StandardCharsets.UTF_8)))))).scroll(TimeValue.timeValueMillis(randomIntBetween(1, 100)));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("using [point in time] is not allowed in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest;
        boolean isMinCompatibleShardVersion = randomBoolean();
        if (isMinCompatibleShardVersion) {
            searchRequest = new SearchRequest(VersionUtils.randomVersion(random()));
        } else {
            searchRequest = new SearchRequest();
        }
        boolean shouldSetCcsMinimizeRoundtrips = randomBoolean();
        if (shouldSetCcsMinimizeRoundtrips) {
            searchRequest.setCcsMinimizeRoundtrips(true);
        }
        ActionRequestValidationException validationErrors = searchRequest.validate();
        if (isMinCompatibleShardVersion && shouldSetCcsMinimizeRoundtrips) {
            assertNotNull(validationErrors);
            assertEquals(1, validationErrors.validationErrors().size());
            assertEquals("[ccs_minimize_roundtrips] cannot be [true] when setting a minimum compatible shard version", validationErrors.validationErrors().get(0));
        } else {
            assertNull(validationErrors);
        }
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).size(0));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires [size] greater than [0]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(1)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).size(2));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires [rank_window_size: 1] be greater than or equal to [size: 2]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(validationErrors.validationErrors().toString(), 1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires a minimum of [2] result sets using a combination of sub searches and/or knn searches", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null)))).scroll(new TimeValue(1000));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used in a scroll context", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(9)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires [rank_window_size: 9] be greater than or equal to [size: 10]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(3)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).size(3).from(4));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNull(validationErrors);
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).addRescorer(new QueryRescorerBuilder(QueryBuilders.termQuery("rescore", "another term"))));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [rescore]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).sort("test"));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [sort]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).collapse(new CollapseBuilder("field")));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [collapse]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).suggest(new SuggestBuilder().setGlobalText("test").addSuggestion("suggestion", new TermSuggestionBuilder("term"))));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [suggest]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).highlighter(new HighlightBuilder().field("field")));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [highlighter]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("test"))));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] cannot be used with [point in time]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).profile(true));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires [profile] is [false]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)).query(QueryBuilders.termQuery("field", "term")).knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))).explain(true));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[rank] requires [explain] is [false]", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest("test").source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[indices] cannot be used with point in time. Do not specify any index with point in time.", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED).source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[indicesOptions] cannot be used with point in time", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().routing("route1").source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[routing] cannot be used with point in time", validationErrors.validationErrors().get(0));
    }
    {
        SearchRequest searchRequest = new SearchRequest().preference("pref1").source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)));
        ActionRequestValidationException validationErrors = searchRequest.validate();
        assertNotNull(validationErrors);
        assertEquals(1, validationErrors.validationErrors().size());
        assertEquals("[preference] cannot be used with point in time", validationErrors.validationErrors().get(0));
    }
}
144547.2520424elasticsearch
protected ClusterStateUpdateStats mutateInstance(ClusterStateUpdateStats instance) {
    switch(between(1, 19)) {
        case 1:
            return new ClusterStateUpdateStats(not(instance.getUnchangedTaskCount()), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 2:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), not(instance.getPublicationSuccessCount()), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 3:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), not(instance.getPublicationFailureCount()), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 4:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), not(instance.getUnchangedComputationElapsedMillis()), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 5:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), not(instance.getUnchangedNotificationElapsedMillis()), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 6:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), not(instance.getSuccessfulComputationElapsedMillis()), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 7:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), not(instance.getSuccessfulPublicationElapsedMillis()), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 8:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), not(instance.getSuccessfulContextConstructionElapsedMillis()), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 9:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), not(instance.getSuccessfulCommitElapsedMillis()), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 10:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), not(instance.getSuccessfulCompletionElapsedMillis()), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 11:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), not(instance.getSuccessfulMasterApplyElapsedMillis()), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 12:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), not(instance.getSuccessfulNotificationElapsedMillis()), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 13:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), not(instance.getFailedComputationElapsedMillis()), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 14:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), not(instance.getFailedPublicationElapsedMillis()), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 15:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), not(instance.getFailedContextConstructionElapsedMillis()), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 16:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), not(instance.getFailedCommitElapsedMillis()), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 17:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), not(instance.getFailedCompletionElapsedMillis()), instance.getFailedMasterApplyElapsedMillis(), instance.getFailedNotificationElapsedMillis());
        case 18:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), not(instance.getFailedMasterApplyElapsedMillis()), instance.getFailedNotificationElapsedMillis());
        case 19:
            return new ClusterStateUpdateStats(instance.getUnchangedTaskCount(), instance.getPublicationSuccessCount(), instance.getPublicationFailureCount(), instance.getUnchangedComputationElapsedMillis(), instance.getUnchangedNotificationElapsedMillis(), instance.getSuccessfulComputationElapsedMillis(), instance.getSuccessfulPublicationElapsedMillis(), instance.getSuccessfulContextConstructionElapsedMillis(), instance.getSuccessfulCommitElapsedMillis(), instance.getSuccessfulCompletionElapsedMillis(), instance.getSuccessfulMasterApplyElapsedMillis(), instance.getSuccessfulNotificationElapsedMillis(), instance.getFailedComputationElapsedMillis(), instance.getFailedPublicationElapsedMillis(), instance.getFailedContextConstructionElapsedMillis(), instance.getFailedCommitElapsedMillis(), instance.getFailedCompletionElapsedMillis(), instance.getFailedMasterApplyElapsedMillis(), not(instance.getFailedNotificationElapsedMillis()));
    }
    throw new AssertionError("impossible");
}
1410896.769354elasticsearch
private Map<String, Object> testCreateDestinationIndex(DataFrameAnalysis analysis, ExpectedError expectedError) throws IOException {
    DataFrameAnalyticsConfig config = createConfig(analysis);
    ArgumentCaptor<CreateIndexRequest> createIndexRequestCaptor = ArgumentCaptor.forClass(CreateIndexRequest.class);
    doAnswer(callListenerOnResponse(null)).when(client).execute(eq(TransportCreateIndexAction.TYPE), createIndexRequestCaptor.capture(), any());
    Map<String, Object> analysisSettings1 = Map.ofEntries(Map.entry("index.analysis.filter.bigram_joiner.max_shingle_size", "2"), Map.entry("index.analysis.filter.bigram_joiner.output_unigrams", "false"), Map.entry("index.analysis.filter.bigram_joiner.token_separator", ""), Map.entry("index.analysis.filter.bigram_joiner.type", "shingle"), Map.entry("index.analysis.filter.bigram_max_size.max", "16"), Map.entry("index.analysis.filter.bigram_max_size.min", "0"), Map.entry("index.analysis.filter.bigram_max_size.type", "length"), Map.entry("index.analysis.filter.en-stem-filter.name", "light_english"), Map.entry("index.analysis.filter.en-stem-filter.type", (expectedError == ExpectedError.INDEX_ANALYSIS_FILTER) ? "foobarbaz" : "stemmer"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.max_shingle_size", "2"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.output_unigrams", "true"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.token_separator", ""), Map.entry("index.analysis.filter.bigram_joiner_unigrams.type", "shingle"), Map.entry("index.analysis.filter.en-stop-words-filter.stopwords", "_english_"), Map.entry("index.analysis.filter.en-stop-words-filter.type", "stop"), Map.entry("index.analysis.analyzer.i_prefix.filter", List.of("cjk_width", "lowercase", "asciifolding", "front_ngram")), Map.entry("index.analysis.analyzer.i_prefix.tokenizer", "standard"), Map.entry("index.analysis.analyzer.iq_text_delimiter.filter", (expectedError == ExpectedError.INDEX_ANALYSIS_ANALYZER) ? List.of("delimiter", "cjk_width", "lowercase", "en-stop-words-filter", "en-stem-filter") : List.of("delimiter", "cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")), Map.entry("index.analysis.analyzer.iq_text_delimiter.tokenizer", "whitespace"), Map.entry("index.analysis.analyzer.q_prefix.filter", List.of("cjk_width", "lowercase", "asciifolding")), Map.entry("index.analysis.analyzer.q_prefix.tokenizer", "standard"), Map.entry("index.analysis.analyzer.iq_text_base.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter")), Map.entry("index.analysis.analyzer.iq_text_base.tokenizer", "standard"), Map.entry("index.analysis.analyzer.iq_text_stem.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")), Map.entry("index.analysis.analyzer.iq_text_stem.tokenizer", "standard"), Map.entry("index.analysis.analyzer.i_text_bigram.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner", "bigram_max_size")), Map.entry("index.analysis.analyzer.i_text_bigram.tokenizer", "standard"), Map.entry("index.analysis.analyzer.q_text_bigram.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner_unigrams", "bigram_max_size")), Map.entry("index.analysis.analyzer.q_text_bigram.tokenizer", "standard"));
    Settings.Builder index1SettingsBuilder = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put("index.mapping.total_fields.limit", 1000).put("index.mapping.depth.limit", 20).put("index.mapping.nested_fields.limit", 50).put("index.mapping.nested_objects.limit", 10000).put("index.mapping.field_name_length.limit", Long.MAX_VALUE).put("index.mapping.dimension_fields.limit", 16).put("index.similarity.default", "bm25");
    index1SettingsBuilder.loadFromMap(analysisSettings1);
    Settings index1Settings = index1SettingsBuilder.build();
    Map<String, Object> analysisSettings2 = Map.ofEntries(Map.entry("index.analysis.filter.front_ngram.max_gram", "12"), Map.entry("index.analysis.filter.front_ngram.min_gram", "1"), Map.entry("index.analysis.filter.front_ngram.type", "edge_ngram"), Map.entry("index.analysis.filter.bigram_joiner.max_shingle_size", "2"), Map.entry("index.analysis.filter.bigram_joiner.output_unigrams", "false"), Map.entry("index.analysis.filter.bigram_joiner.token_separator", ""), Map.entry("index.analysis.filter.bigram_joiner.type", "shingle"), Map.entry("index.analysis.filter.bigram_max_size.max", "16"), Map.entry("index.analysis.filter.bigram_max_size.min", "0"), Map.entry("index.analysis.filter.bigram_max_size.type", "length"), Map.entry("index.analysis.filter.en-stem-filter.name", "light_english"), Map.entry("index.analysis.filter.en-stem-filter.type", "stemmer"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.max_shingle_size", "2"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.output_unigrams", "true"), Map.entry("index.analysis.filter.bigram_joiner_unigrams.token_separator", ""), Map.entry("index.analysis.filter.bigram_joiner_unigrams.type", "shingle"), Map.entry("index.analysis.filter.en-stop-words-filter.stopwords", "_english_"), Map.entry("index.analysis.filter.en-stop-words-filter.type", "stop"), Map.entry("index.analysis.analyzer.iq_text_delimiter.filter", List.of("delimiter", "cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")), Map.entry("index.analysis.analyzer.iq_text_delimiter.tokenizer", "whitespace"), Map.entry("index.analysis.analyzer.q_prefix.filter", List.of("cjk_width", "lowercase", "asciifolding")), Map.entry("index.analysis.analyzer.q_prefix.tokenizer", "standard"), Map.entry("index.analysis.analyzer.iq_text_base.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter")), Map.entry("index.analysis.analyzer.iq_text_base.tokenizer", "standard"), Map.entry("index.analysis.analyzer.iq_text_stem.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")), Map.entry("index.analysis.analyzer.iq_text_stem.tokenizer", "standard"), Map.entry("index.analysis.analyzer.i_text_bigram.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner", "bigram_max_size")), Map.entry("index.analysis.analyzer.i_text_bigram.tokenizer", "standard"), Map.entry("index.analysis.analyzer.q_text_bigram.filter", List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner_unigrams", "bigram_max_size")), Map.entry("index.analysis.analyzer.q_text_bigram.tokenizer", "standard"));
    Settings.Builder index2SettingsBuilder = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 5).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put("index.mapping.total_fields.limit", 99999999).put("index.mapping.depth.limit", 30).put("index.mapping.nested_fields.limit", 40).put("index.mapping.nested_objects.limit", 20000).put("index.mapping.field_name_length.limit", 65536).put("index.mapping.dimension_fields.limit", 32);
    index2SettingsBuilder = (expectedError == ExpectedError.INDEX_SIMILARITY) ? index2SettingsBuilder.put("index.similarity.default", "boolean") : index2SettingsBuilder.put("index.similarity.default", "bm25");
    index2SettingsBuilder.loadFromMap(analysisSettings2);
    Settings index2Settings = index2SettingsBuilder.build();
    ArgumentCaptor<GetSettingsRequest> getSettingsRequestCaptor = ArgumentCaptor.forClass(GetSettingsRequest.class);
    ArgumentCaptor<GetMappingsRequest> getMappingsRequestCaptor = ArgumentCaptor.forClass(GetMappingsRequest.class);
    ArgumentCaptor<FieldCapabilitiesRequest> fieldCapabilitiesRequestCaptor = ArgumentCaptor.forClass(FieldCapabilitiesRequest.class);
    Map<String, Settings> indexToSettings = Map.of("index_1", index1Settings, "index_2", index2Settings);
    GetSettingsResponse getSettingsResponse = new GetSettingsResponse(indexToSettings, Map.of());
    doAnswer(callListenerOnResponse(getSettingsResponse)).when(client).execute(eq(GetSettingsAction.INSTANCE), getSettingsRequestCaptor.capture(), any());
    Map<String, Object> indexMappings = Map.of("properties", Map.of("field_1", "field_1_mappings", "field_2", "field_2_mappings", NUMERICAL_FIELD, Map.of("type", "integer"), OUTER_FIELD, Map.of("properties", Map.of(INNER_FIELD, Map.of("type", "integer"))), ALIAS_TO_NUMERICAL_FIELD, Map.of("type", "alias", "path", NUMERICAL_FIELD), ALIAS_TO_NESTED_FIELD, Map.of("type", "alias", "path", "outer-field.inner-field")));
    MappingMetadata index1MappingMetadata = new MappingMetadata("_doc", indexMappings);
    MappingMetadata index2MappingMetadata = new MappingMetadata("_doc", indexMappings);
    Map<String, MappingMetadata> mappings = Map.of("index_1", index1MappingMetadata, "index_2", index2MappingMetadata);
    GetMappingsResponse getMappingsResponse = new GetMappingsResponse(mappings);
    doAnswer(callListenerOnResponse(getMappingsResponse)).when(client).execute(eq(GetMappingsAction.INSTANCE), getMappingsRequestCaptor.capture(), any());
    FieldCapabilitiesResponse fieldCapabilitiesResponse = new FieldCapabilitiesResponse(new String[0], new HashMap<>() {

        {
            put(NUMERICAL_FIELD, singletonMap("integer", createFieldCapabilities(NUMERICAL_FIELD, "integer")));
            put(OUTER_FIELD + "." + INNER_FIELD, singletonMap("integer", createFieldCapabilities(NUMERICAL_FIELD, "integer")));
            put(ALIAS_TO_NUMERICAL_FIELD, singletonMap("integer", createFieldCapabilities(NUMERICAL_FIELD, "integer")));
            put(ALIAS_TO_NESTED_FIELD, singletonMap("integer", createFieldCapabilities(NUMERICAL_FIELD, "integer")));
        }
    });
    doAnswer(callListenerOnResponse(fieldCapabilitiesResponse)).when(client).execute(eq(TransportFieldCapabilitiesAction.TYPE), fieldCapabilitiesRequestCaptor.capture(), any());
    String errorMessage = "";
    switch(expectedError) {
        case NONE:
            {
                break;
            }
        case INDEX_SIMILARITY:
            {
                errorMessage = "cannot merge settings because of differences for index\\.similarity; specified as " + "\\[\\{\"default\":\"(bm25|boolean)\"}] in index \\[index_\\d]; specified as " + "\\[\\{\"default\":\"(bm25|boolean)\"}] in index \\[index_\\d]";
                break;
            }
        case INDEX_ANALYSIS_FILTER:
            {
                errorMessage = "cannot merge settings because of differences for index\\.analysis\\.filter\\.en-stem-filter; specified as " + "\\[\\{\"name\":\"light_english\",\"type\":\"(stemmer|foobarbaz)\"}] in index \\[index_\\d]; specified as" + " \\[\\{\"name\":\"light_english\",\"type\":\"(stemmer|foobarbaz)\"}] in index \\[index_\\d]";
                break;
            }
        case INDEX_ANALYSIS_ANALYZER:
            {
                errorMessage = "cannot merge settings because of differences for " + "index\\.analysis\\.analyzer\\.iq_text_delimiter; specified as " + "\\[\\{\"filter\":\\[\"delimiter\",\"cjk_width\",\"lowercase\",(\"asciifolding\",)?" + "\"en-stop-words-filter\",\"en-stem-filter\"],\"tokenizer\":\"whitespace\"}] in index \\[index_\\d]; specified as " + "\\[\\{\"filter\":\\[\"delimiter\",\"cjk_width\",\"lowercase\",(\"asciifolding\",)?" + "\"en-stop-words-filter\",\"en-stem-filter\"],\"tokenizer\":\"whitespace\"}] in index \\[index_\\d]";
                break;
            }
        default:
            {
                assertThat("Unexpected error case " + expectedError, Matchers.is(false));
                break;
            }
    }
    if (errorMessage.isEmpty() == false) {
        String finalErrorMessage = errorMessage;
        DestinationIndex.createDestinationIndex(client, clock, config, ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS, assertNoSuccessListener(e -> assertThat(e.getMessage(), Matchers.matchesRegex(finalErrorMessage))));
        return null;
    }
    DestinationIndex.createDestinationIndex(client, clock, config, ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS, ActionTestUtils.assertNoFailureListener(response -> {
    }));
    GetSettingsRequest capturedGetSettingsRequest = getSettingsRequestCaptor.getValue();
    assertThat(capturedGetSettingsRequest.indices(), equalTo(SOURCE_INDEX));
    assertThat(capturedGetSettingsRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
    assertThat(Arrays.asList(capturedGetSettingsRequest.names()), contains("index.number_of_shards", "index.number_of_replicas", "index.analysis.*", "index.similarity.*", "index.mapping.*"));
    assertThat(getMappingsRequestCaptor.getValue().indices(), equalTo(SOURCE_INDEX));
    CreateIndexRequest createIndexRequest = createIndexRequestCaptor.getValue();
    assertThat(createIndexRequest.settings().keySet(), containsInAnyOrder("index.number_of_shards", "index.number_of_replicas", "index.mapping.total_fields.limit", "index.mapping.depth.limit", "index.mapping.nested_fields.limit", "index.mapping.nested_objects.limit", "index.mapping.field_name_length.limit", "index.mapping.dimension_fields.limit", "index.similarity.default", "index.analysis.analyzer.i_prefix.filter", "index.analysis.analyzer.i_prefix.tokenizer", "index.analysis.analyzer.i_text_bigram.filter", "index.analysis.analyzer.i_text_bigram.tokenizer", "index.analysis.analyzer.iq_text_base.filter", "index.analysis.analyzer.iq_text_base.tokenizer", "index.analysis.analyzer.iq_text_delimiter.filter", "index.analysis.analyzer.iq_text_delimiter.tokenizer", "index.analysis.analyzer.iq_text_stem.filter", "index.analysis.analyzer.iq_text_stem.tokenizer", "index.analysis.analyzer.q_prefix.filter", "index.analysis.analyzer.q_prefix.tokenizer", "index.analysis.analyzer.q_text_bigram.filter", "index.analysis.analyzer.q_text_bigram.tokenizer", "index.analysis.filter.bigram_joiner.max_shingle_size", "index.analysis.filter.bigram_joiner.output_unigrams", "index.analysis.filter.bigram_joiner.token_separator", "index.analysis.filter.bigram_joiner.type", "index.analysis.filter.bigram_joiner_unigrams.max_shingle_size", "index.analysis.filter.bigram_joiner_unigrams.output_unigrams", "index.analysis.filter.bigram_joiner_unigrams.token_separator", "index.analysis.filter.bigram_joiner_unigrams.type", "index.analysis.filter.bigram_max_size.max", "index.analysis.filter.bigram_max_size.min", "index.analysis.filter.bigram_max_size.type", "index.analysis.filter.en-stem-filter.name", "index.analysis.filter.en-stem-filter.type", "index.analysis.filter.en-stop-words-filter.stopwords", "index.analysis.filter.en-stop-words-filter.type", "index.analysis.filter.front_ngram.max_gram", "index.analysis.filter.front_ngram.min_gram", "index.analysis.filter.front_ngram.type"));
    assertThat(createIndexRequest.settings().getAsInt("index.number_of_shards", -1), equalTo(5));
    assertThat(createIndexRequest.settings().getAsInt("index.number_of_replicas", -1), equalTo(1));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.total_fields.limit", -1L), equalTo(99999999L));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.depth.limit", -1L), equalTo(30L));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.nested_fields.limit", -1L), equalTo(50L));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.nested_objects.limit", -1L), equalTo(20000L));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.field_name_length.limit", -1L), equalTo(Long.MAX_VALUE));
    assertThat(createIndexRequest.settings().getAsLong("index.mapping.dimension_fields.limit", -1L), equalTo(32L));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.i_prefix.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding", "front_ngram")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.i_prefix.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.iq_text_delimiter.filter"), equalTo(List.of("delimiter", "cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.iq_text_delimiter.tokenizer"), equalTo("whitespace"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.q_prefix.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.q_prefix.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.iq_text_base.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.iq_text_base.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.iq_text_stem.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding", "en-stop-words-filter", "en-stem-filter")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.iq_text_stem.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.i_text_bigram.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner", "bigram_max_size")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.i_text_bigram.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsList("index.analysis.analyzer.q_text_bigram.filter"), equalTo(List.of("cjk_width", "lowercase", "asciifolding", "en-stem-filter", "bigram_joiner_unigrams", "bigram_max_size")));
    assertThat(createIndexRequest.settings().get("index.analysis.analyzer.q_text_bigram.tokenizer"), equalTo("standard"));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.bigram_joiner.max_shingle_size", -1), equalTo(2));
    assertThat(createIndexRequest.settings().getAsBoolean("index.analysis.filter.bigram_joiner.output_unigrams", true), equalTo(false));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.bigram_joiner.token_separator"), equalTo(""));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.bigram_joiner.type"), equalTo("shingle"));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.bigram_joiner_unigrams.max_shingle_size", -1), equalTo(2));
    assertThat(createIndexRequest.settings().getAsBoolean("index.analysis.filter.bigram_joiner_unigrams.output_unigrams", false), equalTo(true));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.bigram_joiner_unigrams.token_separator"), equalTo(""));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.bigram_joiner_unigrams.type"), equalTo("shingle"));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.bigram_max_size.max", -1), equalTo(16));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.bigram_max_size.min", -1), equalTo(0));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.bigram_max_size.type"), equalTo("length"));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.en-stem-filter.name"), equalTo("light_english"));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.en-stem-filter.type"), equalTo("stemmer"));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.en-stop-words-filter.stopwords"), equalTo("_english_"));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.en-stop-words-filter.type"), equalTo("stop"));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.front_ngram.max_gram", -1), equalTo(12));
    assertThat(createIndexRequest.settings().getAsInt("index.analysis.filter.front_ngram.min_gram", -1), equalTo(1));
    assertThat(createIndexRequest.settings().get("index.analysis.filter.front_ngram.type"), equalTo("edge_ngram"));
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, createIndexRequest.mappings())) {
        Map<String, Object> map = parser.map();
        assertThat(extractValue("_doc.properties.ml__incremental_id.type", map), equalTo("long"));
        assertThat(extractValue("_doc.properties.field_1", map), equalTo("field_1_mappings"));
        assertThat(extractValue("_doc.properties.field_2", map), equalTo("field_2_mappings"));
        assertThat(extractValue("_doc.properties.numerical-field.type", map), equalTo("integer"));
        assertThat(extractValue("_doc.properties.outer-field.properties.inner-field.type", map), equalTo("integer"));
        assertThat(extractValue("_doc.properties.alias-to-numerical-field.type", map), equalTo("alias"));
        assertThat(extractValue("_doc.properties.alias-to-nested-field.type", map), equalTo("alias"));
        assertThat(extractValue("_doc._meta.analytics", map), equalTo(ANALYTICS_ID));
        assertThat(extractValue("_doc._meta.creation_date_in_millis", map), equalTo(CURRENT_TIME_MILLIS));
        assertThat(extractValue("_doc._meta.created_by", map), equalTo(CREATED_BY));
        return map;
    }
}
146331.2160215hadoop
public String generateRealTimeTrackingMetrics() {
    double jvmFreeMemoryGB, jvmMaxMemoryGB, jvmTotalMemoryGB;
    if (jvmFreeMemoryGauge == null && metrics.getGauges().containsKey("variable.jvm.free.memory")) {
        jvmFreeMemoryGauge = metrics.getGauges().get("variable.jvm.free.memory");
    }
    if (jvmMaxMemoryGauge == null && metrics.getGauges().containsKey("variable.jvm.max.memory")) {
        jvmMaxMemoryGauge = metrics.getGauges().get("variable.jvm.max.memory");
    }
    if (jvmTotalMemoryGauge == null && metrics.getGauges().containsKey("variable.jvm.total.memory")) {
        jvmTotalMemoryGauge = metrics.getGauges().get("variable.jvm.total.memory");
    }
    jvmFreeMemoryGB = jvmFreeMemoryGauge == null ? 0 : Double.parseDouble(jvmFreeMemoryGauge.getValue().toString()) / 1024 / 1024 / 1024;
    jvmMaxMemoryGB = jvmMaxMemoryGauge == null ? 0 : Double.parseDouble(jvmMaxMemoryGauge.getValue().toString()) / 1024 / 1024 / 1024;
    jvmTotalMemoryGB = jvmTotalMemoryGauge == null ? 0 : Double.parseDouble(jvmTotalMemoryGauge.getValue().toString()) / 1024 / 1024 / 1024;
    String numRunningApps, numRunningContainers;
    if (numRunningAppsGauge == null && metrics.getGauges().containsKey("variable.running.application")) {
        numRunningAppsGauge = metrics.getGauges().get("variable.running.application");
    }
    if (numRunningContainersGauge == null && metrics.getGauges().containsKey("variable.running.container")) {
        numRunningContainersGauge = metrics.getGauges().get("variable.running.container");
    }
    numRunningApps = numRunningAppsGauge == null ? "0" : numRunningAppsGauge.getValue().toString();
    numRunningContainers = numRunningContainersGauge == null ? "0" : numRunningContainersGauge.getValue().toString();
    double allocatedMemoryGB, allocatedVCoresGB, availableMemoryGB, availableVCoresGB;
    if (allocatedMemoryGauge == null && metrics.getGauges().containsKey("variable.cluster.allocated.memory")) {
        allocatedMemoryGauge = metrics.getGauges().get("variable.cluster.allocated.memory");
    }
    if (allocatedVCoresGauge == null && metrics.getGauges().containsKey("variable.cluster.allocated.vcores")) {
        allocatedVCoresGauge = metrics.getGauges().get("variable.cluster.allocated.vcores");
    }
    if (availableMemoryGauge == null && metrics.getGauges().containsKey("variable.cluster.available.memory")) {
        availableMemoryGauge = metrics.getGauges().get("variable.cluster.available.memory");
    }
    if (availableVCoresGauge == null && metrics.getGauges().containsKey("variable.cluster.available.vcores")) {
        availableVCoresGauge = metrics.getGauges().get("variable.cluster.available.vcores");
    }
    allocatedMemoryGB = allocatedMemoryGauge == null ? 0 : Double.parseDouble(allocatedMemoryGauge.getValue().toString()) / 1024;
    allocatedVCoresGB = allocatedVCoresGauge == null ? 0 : Double.parseDouble(allocatedVCoresGauge.getValue().toString());
    availableMemoryGB = availableMemoryGauge == null ? 0 : Double.parseDouble(availableMemoryGauge.getValue().toString()) / 1024;
    availableVCoresGB = availableVCoresGauge == null ? 0 : Double.parseDouble(availableVCoresGauge.getValue().toString());
    double allocateTimecost, commitSuccessTimecost, commitFailureTimecost, handleTimecost;
    if (allocateTimecostHistogram == null && metrics.getHistograms().containsKey("sampler.scheduler.operation.allocate.timecost")) {
        allocateTimecostHistogram = metrics.getHistograms().get("sampler.scheduler.operation.allocate.timecost");
    }
    if (commitSuccessTimecostHistogram == null && metrics.getHistograms().containsKey("sampler.scheduler.operation.commit.success.timecost")) {
        commitSuccessTimecostHistogram = metrics.getHistograms().get("sampler.scheduler.operation.commit.success.timecost");
    }
    if (commitFailureTimecostHistogram == null && metrics.getHistograms().containsKey("sampler.scheduler.operation.commit.failure.timecost")) {
        commitFailureTimecostHistogram = metrics.getHistograms().get("sampler.scheduler.operation.commit.failure.timecost");
    }
    if (handleTimecostHistogram == null && metrics.getHistograms().containsKey("sampler.scheduler.operation.handle.timecost")) {
        handleTimecostHistogram = metrics.getHistograms().get("sampler.scheduler.operation.handle.timecost");
    }
    allocateTimecost = allocateTimecostHistogram == null ? 0.0 : allocateTimecostHistogram.getSnapshot().getMean() / 1000000;
    commitSuccessTimecost = commitSuccessTimecostHistogram == null ? 0.0 : commitSuccessTimecostHistogram.getSnapshot().getMean() / 1000000;
    commitFailureTimecost = commitFailureTimecostHistogram == null ? 0.0 : commitFailureTimecostHistogram.getSnapshot().getMean() / 1000000;
    handleTimecost = handleTimecostHistogram == null ? 0.0 : handleTimecostHistogram.getSnapshot().getMean() / 1000000;
    Map<SchedulerEventType, Double> handleOperTimecostMap = new HashMap<SchedulerEventType, Double>();
    for (SchedulerEventType e : SchedulerEventType.values()) {
        String key = "sampler.scheduler.operation.handle." + e + ".timecost";
        if (!handleOperTimecostHistogramMap.containsKey(e) && metrics.getHistograms().containsKey(key)) {
            handleOperTimecostHistogramMap.put(e, metrics.getHistograms().get(key));
        }
        double timecost = handleOperTimecostHistogramMap.containsKey(e) ? handleOperTimecostHistogramMap.get(e).getSnapshot().getMean() / 1000000 : 0;
        handleOperTimecostMap.put(e, timecost);
    }
    Map<String, Double> queueAllocatedMemoryMap = new HashMap<String, Double>();
    Map<String, Long> queueAllocatedVCoresMap = new HashMap<String, Long>();
    for (String queue : wrapper.getTracker().getQueueSet()) {
        String key = "counter.queue." + queue + ".allocated.memory";
        if (!queueAllocatedMemoryCounterMap.containsKey(queue) && metrics.getCounters().containsKey(key)) {
            queueAllocatedMemoryCounterMap.put(queue, metrics.getCounters().get(key));
        }
        double queueAllocatedMemoryGB = queueAllocatedMemoryCounterMap.containsKey(queue) ? queueAllocatedMemoryCounterMap.get(queue).getCount() / 1024.0 : 0;
        queueAllocatedMemoryMap.put(queue, queueAllocatedMemoryGB);
        key = "counter.queue." + queue + ".allocated.cores";
        if (!queueAllocatedVCoresCounterMap.containsKey(queue) && metrics.getCounters().containsKey(key)) {
            queueAllocatedVCoresCounterMap.put(queue, metrics.getCounters().get(key));
        }
        long queueAllocatedVCores = queueAllocatedVCoresCounterMap.containsKey(queue) ? queueAllocatedVCoresCounterMap.get(queue).getCount() : 0;
        queueAllocatedVCoresMap.put(queue, queueAllocatedVCores);
    }
    if (schedulerCommitSuccessCounter == null && metrics.getCounters().containsKey("counter.scheduler.operation.commit.success")) {
        schedulerCommitSuccessCounter = metrics.getCounters().get("counter.scheduler.operation.commit.success");
    }
    if (schedulerCommitFailureCounter == null && metrics.getCounters().containsKey("counter.scheduler.operation.commit.failure")) {
        schedulerCommitFailureCounter = metrics.getCounters().get("counter.scheduler.operation.commit.failure");
    }
    long schedulerCommitSuccessThroughput = 0;
    long schedulerCommitFailureThroughput = 0;
    if (schedulerCommitSuccessCounter != null && schedulerCommitFailureCounter != null) {
        long currentTrackingTime = System.currentTimeMillis();
        long currentSchedulerCommitSucessCount = schedulerCommitSuccessCounter.getCount();
        long currentSchedulerCommitFailureCount = schedulerCommitFailureCounter.getCount();
        if (lastTrackingTime != null) {
            double intervalSeconds = (double) (currentTrackingTime - lastTrackingTime) / 1000;
            schedulerCommitSuccessThroughput = Math.round((currentSchedulerCommitSucessCount - lastSchedulerCommitSuccessCount) / intervalSeconds);
            schedulerCommitFailureThroughput = Math.round((currentSchedulerCommitFailureCount - lastSchedulerCommitFailureCount) / intervalSeconds);
        }
        lastTrackingTime = currentTrackingTime;
        lastSchedulerCommitSuccessCount = currentSchedulerCommitSucessCount;
        lastSchedulerCommitFailureCount = currentSchedulerCommitFailureCount;
    }
    StringBuilder sb = new StringBuilder();
    sb.append("{");
    sb.append("\"time\":").append(System.currentTimeMillis()).append(",\"jvm.free.memory\":").append(jvmFreeMemoryGB).append(",\"jvm.max.memory\":").append(jvmMaxMemoryGB).append(",\"jvm.total.memory\":").append(jvmTotalMemoryGB).append(",\"running.applications\":").append(numRunningApps).append(",\"running.containers\":").append(numRunningContainers).append(",\"cluster.allocated.memory\":").append(allocatedMemoryGB).append(",\"cluster.allocated.vcores\":").append(allocatedVCoresGB).append(",\"cluster.available.memory\":").append(availableMemoryGB).append(",\"cluster.available.vcores\":").append(availableVCoresGB);
    for (String queue : wrapper.getTracker().getQueueSet()) {
        sb.append(",\"queue.").append(queue).append(".allocated.memory\":").append(queueAllocatedMemoryMap.get(queue));
        sb.append(",\"queue.").append(queue).append(".allocated.vcores\":").append(queueAllocatedVCoresMap.get(queue));
    }
    sb.append(",\"scheduler.allocate.timecost\":").append(allocateTimecost);
    sb.append(",\"scheduler.commit.success.timecost\":").append(commitSuccessTimecost);
    sb.append(",\"scheduler.commit.failure.timecost\":").append(commitFailureTimecost);
    sb.append(",\"scheduler.commit.success.throughput\":").append(schedulerCommitSuccessThroughput);
    sb.append(",\"scheduler.commit.failure.throughput\":").append(schedulerCommitFailureThroughput);
    sb.append(",\"scheduler.handle.timecost\":").append(handleTimecost);
    for (SchedulerEventType e : SchedulerEventType.values()) {
        sb.append(",\"scheduler.handle-").append(e).append(".timecost\":").append(handleOperTimecostMap.get(e));
    }
    sb.append(generateNodeUsageMetrics("memory"));
    sb.append(generateNodeUsageMetrics("vcores"));
    sb.append("}");
    return sb.toString();
}
1420748.025316hadoop
private static String generateRandomWord(int wordLength) {
    String randomWord;
    String[] startBiGram = { "TH", "OF", "AN", "IN", "TO", "CO", "BE", "HE", "RE", "HA", "WA", "FO", "WH", "MA", "WI", "ON", "HI", "PR", "ST", "NO", "IS", "IT", "SE", "WE", "AS", "CA", "DE", "SO", "MO", "SH", "DI", "AL", "AR", "LI", "WO", "FR", "PA", "ME", "AT", "SU", "BU", "SA", "FI", "NE", "CH", "PO", "HO", "DO", "OR", "UN", "LO", "EX", "BY", "FA", "LA", "LE", "PE", "MI", "SI", "YO", "TR", "BA", "GO", "BO", "GR", "TE", "EN", "OU", "RA", "AC", "FE", "PL", "CL", "SP", "BR", "EV", "TA", "DA", "AB", "TI", "RO", "MU", "EA", "NA", "SC", "AD", "GE", "YE", "AF", "AG", "UP", "AP", "DR", "US", "PU", "CE", "IF", "RI", "VI", "IM", "AM", "KN", "OP", "CR", "OT", "JU", "QU", "TW", "GA", "VA", "VE", "PI", "GI", "BI", "FL", "BL", "EL", "JO", "FU", "HU", "CU", "RU", "OV", "MY", "OB", "KE", "EF", "PH", "CI", "KI", "NI", "SL", "EM", "SM", "VO", "MR", "WR", "ES", "DU", "TU", "AU", "NU", "GU", "OW", "SY", "JA", "OC", "EC", "ED", "ID", "JE", "AI", "EI", "SK", "OL", "GL", "EQ", "LU", "AV", "SW", "AW", "EY", "TY" };
    String[] lookupBiGram = { "TH", "AN", "IN", "IO", "EN", "TI", "FO", "HE", "HA", "HI", "TE", "AT", "ER", "AL", "WA", "VE", "CO", "RE", "IT", "WI", "ME", "NC", "ON", "PR", "AR", "ES", "EV", "ST", "EA", "IV", "EC", "NO", "OU", "PE", "IL", "IS", "MA", "AV", "OM", "IC", "GH", "DE", "AI", "CT", "IG", "ID", " OR", "OV", "UL", "YO", "BU", "RA", "FR", "RO", "WH", "OT", "BL", "NT", "UN", "TR", "HO", "AC", "TU", "WE", "CA", "SH", "UR", "IE", "PA", "TO", "EE", "LI", "RI", "UG", "AM", "ND", "US", "LL", "AS", "TA", "LE", "MO", "WO", "MI", "AB", "EL", "IA", "NA", "SS", "AG", "TT", "NE", "PL", " LA", "OS", "CE", "DI", "BE", "AP", "SI", "NI", "OW", "SO", "AK", "CH", "EM", "IM", "SE", "NS", "PO", "EI", "EX", "KI", "UC", "AD", "GR", "IR", "NG", "OP", "SP", "OL", "DA", "NL", "TL", "LO", "BO", "RS", "FE", "FI", "SU", "GE", "MP", "UA", "OO", "RT", "SA", "CR", "FF", "IK", "MB", " KE", "FA", "CI", "EQ", "AF", "ET", "AY", "MU", "UE", "HR", "TW", "GI", "OI", "VI", "CU", "FU", "ED", "QU", "UT", "RC", "OF", "CL", "FT", "IZ", "PP", "RG", "DU", "RM", "YE", "RL", "DO", "AU", "EP", "BA", "JU", "RD", "RU", "OG", "BR", "EF", "KN", "LS", "GA", "PI", "YI", "BI", "IB", "UB", "VA", "OC", "IF", "RN", "RR", "SC", "TC", "CK", "DG", "DR", "MM", "NN", "OD", "RV", "UD", "XP", "JE", "UM", "EG", "DL", "PH", "SL", "GO", "CC", "LU", "OA", "PU", "UI", "YS", "ZA", "HU", "MR", "OE", "SY", "EO", "TY", "UP", "FL", "LM", "NF", "RP", "OH", "NU", "XA", "OB", "VO", "DM", "GN", " LD", "PT", "SK", "WR", "JO", "LT", "YT", "UF", "BJ", "DD", "EY", "GG", "GL", "GU", "HT", "LV", "MS", "NM", "NV", "OK", "PM", "RK", "SW", "TM", "XC", "ZE", "AW", "SM" };
    String[][][] nextCharLookup = { { { "E", "A", "I", "O", "R" }, { "E", "O" } }, { { "D", "T", "Y", "C", "S", "G", "N", "I", "O", "E", "A", "K" }, { "D", "T", "Y", "S", "G", "O", "E", "A", "K" } }, { { "G", "T", "E", "D", "S", "C", "A", "I", "K", "V", "U", "N", "F" }, { "G", "T", "E", "D", "S", "A", "K" } }, { { "N", "U", "R" }, { "N", "U", "R" } }, { { "T", "C", "D", "S", "E", "I", "G", "O", "N", "A" }, { "T", "D", "S", "E", "G", "O", "A" } }, { { "O", "N", "C", "V", "M", "L", "E", "T", "S", "A", "R", "F" }, { "N", "C", "M", "L", "E", "T", "S", "A", "R", "F" } }, { { "R", "U", "O", "L" }, { "R", "U", "O", "L" } }, { { "R", "N", "Y", "S", "M", "I", "A", "L", "D", "T" }, { "R", "N", "Y", "S", "M", "A", "L", "D", "T" } }, { { "T", "D", "V", "N", "S", "R", "P", "L" }, { "T", "D", "N", "S", "R", "L" } }, { { "S", "N", "C", "M", "L", "P", "G", "T", "R", "E" }, { "S", "N", "C", "M", "L", "P", "G", "T", "R", "E" } }, { { "R", "D", "N", "S", "M", "L", "E", "C", "A" }, { "R", "D", "N", "S", "M", "L", "E", "A" } }, { { "I", "E", "T", "H", "U", "O", "C" }, { "E", "H", "O" } }, { { "E", "S", "I", "A", "N", "Y", "T", "V", "M", "R", "O", "L", "G", "F", "C" }, { "E", "S", "A", "N", "Y", "T", "M" } }, { { "L", "S", "I", "T", "E", "U", "O", "M", "K", "F", "A" }, { "L", "S", "T", "E", "F" } }, { { "S", "Y", "R", "T", "N", "L" }, { "S", "Y", "R", "T", "N", "L" } }, { { "R", "N", "L", "S", "D" }, { "R", "N", "L", "S", "D" } }, { { "N", "M", "U", "R", "L", "V", "S", "O" }, { "N", "M", "U", "R", "L", "O" } }, { { "S", "A", "D", "N", "E", "C", "L", "T", "P", "M", "V", "G", "F", "Q" }, { "S", "A", "D", "N", "E", "L", "T", "P", "M" } }, { { "H", "I", "Y", "E", "S", "T", "A", "U" }, { "H", "Y", "E", "S", "A" } }, { { "T", "L", "N", "S" }, { "T", "L", "N", "S" } }, { { "N", "R", "D", "T", "S", "M", "A" }, { "N", "R", "D", "T", "S", "M", "A" } }, { { "E", "I", "H", "T", "R", "O", "L" }, { "E", "H", "T" } }, { { "S", "E", "T", "G", "A", "D", "L", "C", "V", "O", "I", "F" }, { "S", "E", "T", "G", "A", "D", "O" } }, { { "O", "E", "I", "A" }, { "E", "A" } }, { { "E", "T", "D", "Y", "S", "I", "R", "L", "M", "K", "G", "A", "O", "N", "C" }, { "E", "T", "D", "Y", "S", "M", "K", "A", "N" } }, { { "S", "T", "E", "I", "P", "U", "C" }, { "S", "T", "E" } }, { { "E", "I" }, { "E" } }, { { "A", "R", "I", "E", "O", "U", "S" }, { "A", "E", "O", "S" } }, { { "R", "S", "T", "D", "L", "C", "N", "V", "M", "K" }, { "R", "S", "T", "D", "L", "N", "M" } }, { { "E", "I", "A" }, { "E" } }, { { "T", "O", "I", "E", "A", "U", "R", "H" }, { "T", "E", "H" } }, { { "T", "W", "R", "U", "N", "M" }, { "T", "W", "R", "U", "N", "M" } }, { { "T", "L", "R", "N", "S", "G", "P", "B" }, { "T", "L", "R", "N", "S", "P" } }, { { "R", "N", "C", "A", "D", "T", "O" }, { "R", "N", "A", "D", "T" } }, { { "L", "E", "I", "Y", "D", "A" }, { "L", "E", "Y", "D" } }, { { "T", "H", "S", "I", "E", "C", "M" }, { "T", "H", "S", "E", "M" } }, { { "N", "T", "L", "K", "D", "S", "I", "G" }, { "N", "T", "L", "D", "S" } }, { { "E", "I", "A" }, { "E" } }, { { "E", "P", "M", "I", "A" }, { "E" } }, { { "A", "H", "E", "I", "T", "K", "U", "S" }, { "H", "E", "T", "K", "S" } }, { { "T" }, { "T" } }, { { "R", "N", "S", "D", "A", "V", "P", "T", "M", "L", "F" }, { "R", "N", "S", "D", "A", "P", "T", "M", "L" } }, { { "N", "D", "R", "L", "T" }, { "N", "D", "R", "L", "T" } }, { { "I", "E", "U", "S", "O" }, { "E", "S", "O" } }, { { "H", "N", "I" }, { "H", "N" } }, { { "E" }, { "E" } }, { { "E", "T", "M", "D", "S", "K", "I", "Y", "L", "G", "A", "R", "N", "C" }, { "E", "T", "M", "D", "S", "K", "Y", "A", "N" } }, { { "E", "I" }, { "E" } }, { { "D", "T", "A", "L" }, { "D", "T", "L" } }, { { "U" }, { "U" } }, { { "T", "S", "R", "I" }, { "T", "S", "R" } }, { { "T", "N", "L", "C", "I", "M", "D", "S", "R", "P", "G", "B" }, { "T", "N", "L", "M", "D", "S", "R" } }, { { "O", "E", "A" }, { "E", "A" } }, { { "M", "U", "V", "P", "N", "W", "S", "O", "L", "D", "C", "B", "A", "T", "G" }, { "M", "U", "P", "N", "W", "O", "L", "D", "T" } }, { { "I", "E", "O", "A" }, { "E", "O" } }, { { "H", "E", "T", "I" }, { "H", "E" } }, { { "E", "I", "Y", "O", "A" }, { "E", "Y" } }, { { "E", "I", "S", "R", "O", "A", "L", "Y", "U", "H" }, { "E", "S", "O", "A", "Y", "H" } }, { { "D", "T", "I", "C", "G" }, { "D", "T", "G" } }, { { "A", "I", "O", "E", "U", "Y" }, { "A", "E", "Y" } }, { { "U", "W", "S", "R", "L", "O", "M", "T", "P", "N", "D" }, { "U", "W", "R", "L", "O", "M", "T", "P", "N", "D" } }, { { "T", "E", "K", "H", "C", "R", "I" }, { "T", "E", "K", "H" } }, { { "R", "D", "A", "T" }, { "R", "T" } }, { { "R", "L", "E", "V", "S", "N", "A" }, { "R", "L", "E", "S", "N", "A" } }, { { "L", "N", "T", "R", "U", "S", "M", "P" }, { "L", "N", "T", "R", "S", "M" } }, { { "E", "O", "I", "A" }, { "E", "O" } }, { { "E", "N", "T", "S", "I", "A", "Y", "R", "P", "C" }, { "E", "N", "T", "S", "A", "Y" } }, { { "S", "N", "D", "T", "W", "V", "R", "L", "F" }, { "S", "N", "D", "T", "W", "R", "L" } }, { { "R", "T", "S", "N", "L", "I", "C" }, { "R", "T", "S", "N", "L" } }, { { "R", "O", "N", "W", "P", "M", "L" }, { "R", "O", "N", "W", "P", "M", "L" } }, { { "N", "D", "T", "M", "S", "R", "P", "L", "K" }, { "N", "D", "T", "M", "S", "R", "P", "L", "K" } }, { { "N", "T", "S", "C", "K", "G", "E", "F", "Z", "V", "O", "M", "A" }, { "N", "T", "S", "C", "G", "E", "F", "M", "A" } }, { { "N", "E", "C", "T", "S", "G", "A", "V", "O", "P", "M", "L", "D", "B" }, { "N", "E", "C", "T", "S", "G", "A", "P", "M", "L", "D" } }, { { "H", "G" }, { "H" } }, { { "E", "P", "I", "O", "A" }, { "E" } }, { { "E", "I", "S", "A", "U", "O" }, { "E", "S", "O" } }, { { "E", "T", "I", "S", "L", "H" }, { "E", "T", "S", "H" } }, { { "Y", "E", "O", "I", "S", "A" }, { "Y", "E", "S" } }, { { "T", "S", "E", "I", "U", "O", "K", "H" }, { "T", "S", "E", "O", "H" } }, { { "T", "N", "L", "I", "R", "K", "B", "G", "C" }, { "T", "N", "L", "R" } }, { { "S", "D", "A", "T", "C", "R", "N", "M", "G", "V", "F" }, { "S", "D", "A", "T", "R", "N", "M" } }, { { "R", "S", "V", "T", "U", "D" }, { "R", "T", "U", "D" } }, { { "R", "U" }, { "R", "U" } }, { { "N", "L", "S", "T", "C", "G" }, { "N", "L", "S", "T", "C", "G" } }, { { "L", "O", "I" }, {} }, { { "L", "Y", "I", "E", "F", "O", "A", "T", "S", "P", "D" }, { "L", "Y", "E", "F", "T", "S", "D" } }, { { "L", "N", "T" }, { "L", "N", "T" } }, { { "L", "T", "R", "N", "M" }, { "L", "T", "R", "N", "M" } }, { { "I", "E", "U", "O", "A" }, { "E", "O" } }, { { "E", "A", "O" }, { "E", "O" } }, { { "E", "L", "I" }, { "E" } }, { { "D", "S", "W", "R", "E", "Y", "V", "T", "L", "C", "A" }, { "D", "S", "W", "R", "E", "Y", "T", "L", "A" } }, { { "A", "E", "I", "Y", "O" }, { "E", "Y" } }, { { "T", "N", "R", "S", "C", "Y", "W", "I", "B" }, { "T", "N", "R", "S", "Y", "W" } }, { { "T", "E", "S", "I" }, { "T", "E", "S" } }, { { "S", "N", "R", "D", "P", "L", "I" }, { "S", "N", "R", "D", "P", "L" } }, { { "S", "N", "T", "D", "F", "E", "C", "A", "V", "R" }, { "S", "N", "T", "D", "F", "E", "C", "A", "R" } }, { { "R", "E", "C", "T", "L", "F", "S", "I", "G", "D", "A" }, { "R", "E", "T", "L", "S", "D", "A" } }, { { "P", "E", "A" }, { "E" } }, { { "O", "N", "D", "T", "S", "G", "C", "B", "V", "M", "A" }, { "N", "D", "T", "S", "G", "C", "M", "A" } }, { { "N", "T", "S", "C", "Z", "O", "G", "F" }, { "N", "T", "S", "C", "G", "F" } }, { { "N", "E", "S", "I", "A" }, { "N", "E", "S" } }, { { "N", "M", "U", "L", "C", "R" }, { "N", "M", "U", "L", "R" } }, { { "E", "I" }, { "E" } }, { { "E", "A", "I", "O", "U", "R" }, { "E", "O" } }, { { "E", "S", "P", "O", "B", "A", "I" }, { "E", "S" } }, { { "E", "P", "I", "A", "S", "M" }, { "E", "S" } }, { { "D", "N", "L", "S", "R", "E", "C", "T", "V", "A" }, { "D", "N", "L", "S", "R", "E", "T", "A" } }, { { "T", "I", "E" }, { "T", "E" } }, { { "S", "R", "N", "L", "W", "T", "I" }, { "R", "N", "L", "W", "T" } }, { { "R", "N", "G", "T" }, { "R", "N", "G", "T" } }, { { "P", "T", "I", "C", "A" }, { "T" } }, { { "N" }, { "N" } }, { { "H", "T", "K", "E" }, { "H", "T", "K", "E" } }, { { "E", "I", "Y", "V", "M", "D" }, { "E", "Y" } }, { { "E", "A", "O" }, { "E", "A" } }, { { "E", "S", "T", "L", "I" }, { "E", "S", "T" } }, { { "E", "S", "L", "T", "R", "I" }, { "E", "S" } }, { { "E", "P", "L" }, { "E" } }, { { "E", "O", "I", "A" }, { "E" } }, { { "D", "L", "I", "O", "E", "U" }, { "D", "L", "E" } }, { { "Y", "T", "R", "N" }, { "Y", "T", "R", "N" } }, { { "Y" }, { "Y" } }, { { "Y", "E" }, { "Y", "E" } }, { { "W", "N", "O", "S", "C", "V", "U", "T", "R", "P", "G" }, { "W", "N", "O", "U", "T", "R", "P" } }, { { "U", "T", "R", "O", "D", "A" }, { "U", "T", "R", "O", "D" } }, { { "T", "E", "O", "I" }, { "T", "E", "O" } }, { { "R", "E", "W", "L", "C", "A" }, { "R", "E", "W", "L", "A" } }, { { "R", "N", "C", "E", "L", "G" }, { "R", "N", "C", "E", "L", "G" } }, { { "R", "C", "P", "B", "M", "L", "A" }, { "R", "P", "M", "L" } }, { { "N", "T", "S", "R", "D" }, { "N", "T", "S", "R", "D" } }, { { "L", "O", "A", "T", "R", "E" }, { "T", "E" } }, { { "L", "T", "R" }, { "L", "T", "R" } }, { { "K", "D", "L", "T", "R", "N", "M" }, { "K", "D", "L", "T", "R", "N", "M" } }, { { "I", "H", "A", "E", "Y", "U", "S" }, { "H", "A", "E", "Y", "S" } }, { { "I", "M", "Y", "N", "L" }, { "M", "Y", "N", "L" } }, { { "E", "I", "O", "A" }, { "E", "A" } }, { { "E", "I" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "D", "N", "T", "S", "R", "E" }, { "D", "N", "T", "S", "R", "E" } }, { { "C", "R", "M", "I" }, { "R", "M" } }, { { "A", "T", "E", "S", "P", "N" }, { "A", "T", "E", "S", "P", "N" } }, { { "U" }, {} }, { { "T", "F" }, { "T", "F" } }, { { "T", "I", "H", "E", "Y", "W", "S", "A" }, { "H", "E", "Y", "S", "A" } }, { { "S", "E" }, { "S" } }, { { "S", "N", "L", "C" }, { "S", "N", "L" } }, { { "S", "N" }, { "S", "N" } }, { { "O", "E", "I" }, { "E" } }, { { "O", "E" }, { "O", "E" } }, { { "N", "V", "O", "C" }, { "N", "C" } }, { { "N" }, { "N" } }, { { "N", "D", "S", "C", "T", "O", "L", "E" }, { "N", "D", "S", "C", "T", "L", "E" } }, { { "L", "R", "T", "S" }, { "L", "R", "T", "S" } }, { { "L", "R", "N" }, { "L", "R", "N" } }, { { "I", "U", "E" }, { "E" } }, { { "I", "E", "A" }, { "E" } }, { { "I", "H", "E" }, { "H", "E" } }, { { "H", "E" }, { "H", "E" } }, { { "F", "T" }, { "F", "T" } }, { { "E", "A", "U", "O" }, { "E" } }, { { "E" }, { "E" } }, { { "E", "A" }, { "E" } }, { { "E", "O", "R", "L" }, { "E" } }, { { "E", "A" }, { "E" } }, { { "C", "S", "R", "A" }, { "S", "R" } }, { { "A", "S", "I", "E" }, { "S", "E" } }, { { "A", "S", "D" }, { "A", "S", "D" } }, { { "Y", "D" }, { "Y", "D" } }, { { "W", "N", "M", "E" }, { "W", "N", "M" } }, { { "T", "S" }, { "T", "S" } }, { { "T", "O", "E", "A" }, { "T", "E" } }, { { "S", "C", "R", "N", "L" }, { "S", "R", "N", "L" } }, { { "S" }, { "S" } }, { { "S", "E", "I" }, { "S", "E" } }, { { "S", "N", "C" }, { "S", "N" } }, { { "R", "I" }, {} }, { { "O", "I", "E", "A" }, { "E", "A" } }, { { "O", "F", "U", "T", "E" }, { "F", "T", "E" } }, { { "O", "E" }, { "O", "E" } }, { { "O" }, { "O" } }, { { "N", "I", "T", "R" }, { "N", "T", "R" } }, { { "N", "T", "R", "E", "C" }, { "N", "T", "R", "E", "C" } }, { { "N" }, { "N" } }, { { "L", "T", "N" }, { "L", "T", "N" } }, { { "L", "I", "E" }, { "E" } }, { { "L" }, {} }, { { "L", "T", "R", "N" }, { "L", "T", "R", "N" } }, { { "K", "I", "E", "C", "A" }, { "K", "E" } }, { { "I", "F", "E", "T" }, { "F", "E", "T" } }, { { "I", "E", "M", "A" }, { "E", "A" } }, { { "I", "E", "Y", "O" }, { "E", "Y" } }, { { "H", "R", "O", "I", "A" }, { "H" } }, { { "H" }, { "H" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E", "O", "I", "A" }, { "E", "A" } }, { { "E", "U", "I" }, { "E" } }, { { "E", "O", "I" }, { "E", "O" } }, { { "E", "Y", "U" }, { "E", "Y" } }, { { "E", "I" }, { "E" } }, { { "E", "I" }, { "E" } }, { { "E" }, { "E" } }, { { "C" }, {} }, { { "B", "E" }, { "E" } }, { { "A", "R", "I", "E" }, { "E" } }, { { "Y", "E" }, { "Y", "E" } }, { { "Y", "O", "I", "E" }, { "Y", "O", "E" } }, { { "Y", "A" }, { "Y" } }, { { "V", "T", "O" }, { "T", "O" } }, { { "U", "O", "E" }, { "E" } }, { { "T", "S", "M", "E", "D" }, { "T", "S", "M", "E" } }, { { "T", "R", "D" }, { "T", "R", "D" } }, { { "T", "R", "L", "B" }, { "T", "R", "L" } }, { { "T", "R", "L" }, { "T", "R", "L" } }, { { "T" }, { "T" } }, { { "T" }, { "T" } }, { { "S", "R", "N", "M" }, { "S", "R", "N", "M" } }, { { "S" }, { "S" } }, { { "S" }, { "S" } }, { { "S" }, { "S" } }, { { "R", "P" }, { "R", "P" } }, { { "P" }, {} }, { { "P", "O" }, {} }, { { "O", "E" }, { "E" } }, { { "O" }, {} }, { { "O" }, {} }, { { "O" }, {} }, { { "N" }, {} }, { { "M" }, { "M" } }, { { "M" }, { "M" } }, { { "L" }, {} }, { { "L" }, { "L" } }, { { "I" }, {} }, { { "I" }, {} }, { { "I", "E" }, { "E" } }, { { "I" }, {} }, { { "I", "E" }, { "E" } }, { { "I" }, {} }, { { "H" }, {} }, { { "H", "E" }, { "H", "E" } }, { { "H" }, { "H" } }, { { "F" }, { "F" } }, { { "E" }, {} }, { { "E" }, { "E" } }, { { "E" }, {} }, { { "E" }, { "E" } }, { { "E", "A" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "E" }, { "E" } }, { { "D" }, { "D" } }, { { "A" }, {} }, { { "A" }, {} } };
    randomWord = startBiGram[indexGenerator(startBiGram.length)];
    int flag = 0;
    int count = 0;
    String previousWord;
    while (randomWord.length() != wordLength) {
        previousWord = randomWord;
        randomWord = addCharacter(startBiGram, wordLength, randomWord, lookupBiGram, nextCharLookup, flag);
        if (previousWord.equals(randomWord)) {
            count++;
        } else {
            flag = 0;
        }
        if (count == 5) {
            flag = 1;
            count++;
        } else if (count == 20) {
            randomWord = startBiGram[indexGenerator(startBiGram.length)];
            count = 0;
        }
    }
    return randomWord;
}
145349.9762208wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final EjbJarMetaData metaData = deploymentUnit.getAttachment(EjbDeploymentAttachmentKeys.EJB_JAR_METADATA);
    if (metaData == null || metaData.getAssemblyDescriptor() == null) {
        return;
    }
    final EEModuleDescription eeModuleDescription = deploymentUnit.getAttachment(Attachments.EE_MODULE_DESCRIPTION);
    final Module module = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.MODULE);
    final DeploymentReflectionIndex index = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.REFLECTION_INDEX);
    final List<ContainerInterceptorsMetaData> containerInterceptorConfigurations = metaData.getAssemblyDescriptor().getAny(ContainerInterceptorsMetaData.class);
    if (containerInterceptorConfigurations == null || containerInterceptorConfigurations.isEmpty()) {
        return;
    }
    final ContainerInterceptorsMetaData containerInterceptorsMetaData = containerInterceptorConfigurations.get(0);
    if (containerInterceptorsMetaData == null) {
        return;
    }
    final InterceptorBindingsMetaData containerInterceptorBindings = containerInterceptorsMetaData.getInterceptorBindings();
    if (containerInterceptorBindings == null || containerInterceptorBindings.isEmpty()) {
        return;
    }
    final Map<String, List<InterceptorBindingMetaData>> bindingsPerEJB = new HashMap<String, List<InterceptorBindingMetaData>>();
    final List<InterceptorBindingMetaData> bindingsForAllEJBs = new ArrayList<InterceptorBindingMetaData>();
    for (final InterceptorBindingMetaData containerInterceptorBinding : containerInterceptorBindings) {
        if (containerInterceptorBinding.getEjbName().equals("*")) {
            if (containerInterceptorBinding.getMethod() != null) {
                throw EjbLogger.ROOT_LOGGER.defaultInterceptorsNotBindToMethod();
            }
            if (containerInterceptorBinding.getInterceptorOrder() != null) {
                throw EjbLogger.ROOT_LOGGER.defaultInterceptorsNotSpecifyOrder();
            }
            bindingsForAllEJBs.add(containerInterceptorBinding);
        } else {
            List<InterceptorBindingMetaData> bindings = bindingsPerEJB.get(containerInterceptorBinding.getEjbName());
            if (bindings == null) {
                bindings = new ArrayList<InterceptorBindingMetaData>();
                bindingsPerEJB.put(containerInterceptorBinding.getEjbName(), bindings);
            }
            bindings.add(containerInterceptorBinding);
        }
    }
    final List<InterceptorDescription> interceptorDescriptionsForAllEJBs = new ArrayList<InterceptorDescription>();
    for (InterceptorBindingMetaData binding : bindingsForAllEJBs) {
        if (binding.getInterceptorClasses() != null) {
            for (final String clazz : binding.getInterceptorClasses()) {
                interceptorDescriptionsForAllEJBs.add(new InterceptorDescription(clazz));
            }
        }
    }
    for (final ComponentDescription componentDescription : eeModuleDescription.getComponentDescriptions()) {
        if (!(componentDescription instanceof EJBComponentDescription)) {
            continue;
        }
        final EJBComponentDescription ejbComponentDescription = (EJBComponentDescription) componentDescription;
        final Class<?> componentClass;
        try {
            componentClass = module.getClassLoader().loadClass(ejbComponentDescription.getComponentClassName());
        } catch (ClassNotFoundException e) {
            throw EjbLogger.ROOT_LOGGER.failToLoadComponentClass(e, ejbComponentDescription.getComponentClassName());
        }
        final List<InterceptorBindingMetaData> bindingsApplicableForCurrentEJB = bindingsPerEJB.get(ejbComponentDescription.getComponentName());
        final Map<Method, List<InterceptorBindingMetaData>> methodInterceptors = new HashMap<Method, List<InterceptorBindingMetaData>>();
        final List<InterceptorBindingMetaData> classLevelBindings = new ArrayList<InterceptorBindingMetaData>();
        boolean classLevelExcludeDefaultInterceptors = false;
        Map<Method, Boolean> methodLevelExcludeDefaultInterceptors = new HashMap<Method, Boolean>();
        Map<Method, Boolean> methodLevelExcludeClassInterceptors = new HashMap<Method, Boolean>();
        boolean classLevelAbsoluteOrder = false;
        final Map<Method, Boolean> methodLevelAbsoluteOrder = new HashMap<Method, Boolean>();
        if (bindingsApplicableForCurrentEJB != null) {
            for (final InterceptorBindingMetaData binding : bindingsApplicableForCurrentEJB) {
                if (binding.getMethod() == null) {
                    classLevelBindings.add(binding);
                    if (binding.isExcludeDefaultInterceptors()) {
                        classLevelExcludeDefaultInterceptors = true;
                    }
                    if (binding.isTotalOrdering()) {
                        if (classLevelAbsoluteOrder) {
                            throw EjbLogger.ROOT_LOGGER.twoEjbBindingsSpecifyAbsoluteOrder(componentClass.toString());
                        } else {
                            classLevelAbsoluteOrder = true;
                        }
                    }
                } else {
                    final NamedMethodMetaData methodData = binding.getMethod();
                    final ClassReflectionIndex classIndex = index.getClassIndex(componentClass);
                    Method resolvedMethod = null;
                    if (methodData.getMethodParams() == null) {
                        final Collection<Method> methods = classIndex.getAllMethods(methodData.getMethodName());
                        if (methods.isEmpty()) {
                            throw EjbLogger.ROOT_LOGGER.failToFindMethodInEjbJarXml(componentClass.getName(), methodData.getMethodName());
                        } else if (methods.size() > 1) {
                            throw EjbLogger.ROOT_LOGGER.multipleMethodReferencedInEjbJarXml(methodData.getMethodName(), componentClass.getName());
                        }
                        resolvedMethod = methods.iterator().next();
                    } else {
                        final Collection<Method> methods = classIndex.getAllMethods(methodData.getMethodName(), methodData.getMethodParams().size());
                        for (final Method method : methods) {
                            boolean match = true;
                            for (int i = 0; i < method.getParameterCount(); ++i) {
                                if (!method.getParameterTypes()[i].getName().equals(methodData.getMethodParams().get(i))) {
                                    match = false;
                                    break;
                                }
                            }
                            if (match) {
                                resolvedMethod = method;
                                break;
                            }
                        }
                        if (resolvedMethod == null) {
                            throw EjbLogger.ROOT_LOGGER.failToFindMethodWithParameterTypes(componentClass.getName(), methodData.getMethodName(), methodData.getMethodParams());
                        }
                    }
                    List<InterceptorBindingMetaData> methodSpecificInterceptorBindings = methodInterceptors.get(resolvedMethod);
                    if (methodSpecificInterceptorBindings == null) {
                        methodSpecificInterceptorBindings = new ArrayList<InterceptorBindingMetaData>();
                        methodInterceptors.put(resolvedMethod, methodSpecificInterceptorBindings);
                    }
                    methodSpecificInterceptorBindings.add(binding);
                    if (binding.isExcludeDefaultInterceptors()) {
                        methodLevelExcludeDefaultInterceptors.put(resolvedMethod, true);
                    }
                    if (binding.isExcludeClassInterceptors()) {
                        methodLevelExcludeClassInterceptors.put(resolvedMethod, true);
                    }
                    if (binding.isTotalOrdering()) {
                        if (methodLevelAbsoluteOrder.containsKey(resolvedMethod)) {
                            throw EjbLogger.ROOT_LOGGER.twoEjbBindingsSpecifyAbsoluteOrder(resolvedMethod.toString());
                        } else {
                            methodLevelAbsoluteOrder.put(resolvedMethod, true);
                        }
                    }
                }
            }
        }
        ejbComponentDescription.setDefaultContainerInterceptors(interceptorDescriptionsForAllEJBs);
        if (classLevelExcludeDefaultInterceptors) {
            ejbComponentDescription.setExcludeDefaultContainerInterceptors(true);
        }
        final List<InterceptorDescription> classLevelInterceptors = new ArrayList<InterceptorDescription>();
        if (classLevelAbsoluteOrder) {
            for (final InterceptorBindingMetaData binding : classLevelBindings) {
                if (binding.isTotalOrdering()) {
                    for (final String interceptor : binding.getInterceptorOrder()) {
                        classLevelInterceptors.add(new InterceptorDescription(interceptor));
                    }
                    break;
                }
            }
            ejbComponentDescription.setExcludeDefaultContainerInterceptors(true);
        } else {
            for (InterceptorBindingMetaData binding : classLevelBindings) {
                if (binding.getInterceptorClasses() != null) {
                    for (final String interceptor : binding.getInterceptorClasses()) {
                        classLevelInterceptors.add(new InterceptorDescription(interceptor));
                    }
                }
            }
        }
        ejbComponentDescription.setClassLevelContainerInterceptors(classLevelInterceptors);
        for (Map.Entry<Method, List<InterceptorBindingMetaData>> entry : methodInterceptors.entrySet()) {
            final Method method = entry.getKey();
            final List<InterceptorBindingMetaData> methodBindings = entry.getValue();
            boolean totalOrder = methodLevelAbsoluteOrder.containsKey(method);
            final MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifierForMethod(method);
            Boolean excludeDefaultInterceptors = methodLevelExcludeDefaultInterceptors.get(method);
            excludeDefaultInterceptors = excludeDefaultInterceptors == null ? Boolean.FALSE : excludeDefaultInterceptors;
            if (!excludeDefaultInterceptors) {
                excludeDefaultInterceptors = ejbComponentDescription.isExcludeDefaultContainerInterceptors() || ejbComponentDescription.isExcludeDefaultContainerInterceptors(methodIdentifier);
            }
            Boolean excludeClassInterceptors = methodLevelExcludeClassInterceptors.get(method);
            excludeClassInterceptors = excludeClassInterceptors == null ? Boolean.FALSE : excludeClassInterceptors;
            if (!excludeClassInterceptors) {
                excludeClassInterceptors = ejbComponentDescription.isExcludeClassLevelContainerInterceptors(methodIdentifier);
            }
            final List<InterceptorDescription> methodLevelInterceptors = new ArrayList<InterceptorDescription>();
            if (totalOrder) {
                for (final InterceptorBindingMetaData binding : methodBindings) {
                    if (binding.isTotalOrdering()) {
                        for (final String interceptor : binding.getInterceptorOrder()) {
                            methodLevelInterceptors.add(new InterceptorDescription(interceptor));
                        }
                    }
                }
            } else {
                if (!excludeDefaultInterceptors) {
                    methodLevelInterceptors.addAll(interceptorDescriptionsForAllEJBs);
                }
                if (!excludeClassInterceptors) {
                    for (InterceptorDescription interceptor : classLevelInterceptors) {
                        methodLevelInterceptors.add(interceptor);
                    }
                }
                for (final InterceptorBindingMetaData binding : methodBindings) {
                    if (binding.getInterceptorClasses() != null) {
                        for (final String interceptor : binding.getInterceptorClasses()) {
                            methodLevelInterceptors.add(new InterceptorDescription(interceptor));
                        }
                    }
                }
            }
            ejbComponentDescription.excludeClassLevelContainerInterceptors(methodIdentifier);
            ejbComponentDescription.excludeDefaultContainerInterceptors(methodIdentifier);
            ejbComponentDescription.setMethodContainerInterceptors(methodIdentifier, methodLevelInterceptors);
        }
    }
}
156530.721459elasticsearch
public List<NamedXContentRegistry.Entry> getNamedXContentParsers() {
    List<NamedXContentRegistry.Entry> namedXContent = new ArrayList<>();
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, OneHotEncoding.NAME, (p, c) -> OneHotEncoding.fromXContentLenient(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, TargetMeanEncoding.NAME, (p, c) -> TargetMeanEncoding.fromXContentLenient(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, FrequencyEncoding.NAME, (p, c) -> FrequencyEncoding.fromXContentLenient(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, CustomWordEmbedding.NAME, (p, c) -> CustomWordEmbedding.fromXContentLenient(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, NGram.NAME, (p, c) -> NGram.fromXContentLenient(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedPreProcessor.class, Multi.NAME, (p, c) -> Multi.fromXContentLenient(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, OneHotEncoding.NAME, (p, c) -> OneHotEncoding.fromXContentStrict(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, TargetMeanEncoding.NAME, (p, c) -> TargetMeanEncoding.fromXContentStrict(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, FrequencyEncoding.NAME, (p, c) -> FrequencyEncoding.fromXContentStrict(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, CustomWordEmbedding.NAME, (p, c) -> CustomWordEmbedding.fromXContentStrict(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, NGram.NAME, (p, c) -> NGram.fromXContentStrict(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedPreProcessor.class, Multi.NAME, (p, c) -> Multi.fromXContentStrict(p, (PreProcessor.PreProcessorParseContext) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedTrainedModel.class, Tree.NAME, Tree::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedTrainedModel.class, Ensemble.NAME, Ensemble::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedTrainedModel.class, LangIdentNeuralNetwork.NAME, LangIdentNeuralNetwork::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedOutputAggregator.class, WeightedMode.NAME, WeightedMode::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedOutputAggregator.class, WeightedSum.NAME, WeightedSum::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedOutputAggregator.class, LogisticRegression.NAME, LogisticRegression::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedOutputAggregator.class, Exponent.NAME, Exponent::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedTrainedModel.class, Tree.NAME, Tree::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedTrainedModel.class, Ensemble.NAME, Ensemble::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedTrainedModel.class, LangIdentNeuralNetwork.NAME, LangIdentNeuralNetwork::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedOutputAggregator.class, WeightedMode.NAME, WeightedMode::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedOutputAggregator.class, WeightedSum.NAME, WeightedSum::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedOutputAggregator.class, LogisticRegression.NAME, LogisticRegression::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedOutputAggregator.class, Exponent.NAME, Exponent::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedTrainedModelLocation.class, IndexLocation.INDEX, IndexLocation::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedTrainedModelLocation.class, IndexLocation.INDEX, IndexLocation::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, ClassificationConfig.NAME, ClassificationConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, ClassificationConfig.NAME, ClassificationConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, RegressionConfig.NAME, RegressionConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, RegressionConfig.NAME, RegressionConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(NerConfig.NAME), NerConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(NerConfig.NAME), NerConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(FillMaskConfig.NAME), FillMaskConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(FillMaskConfig.NAME), FillMaskConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(TextExpansionConfig.NAME), TextExpansionConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(TextExpansionConfig.NAME), TextExpansionConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(TextClassificationConfig.NAME), TextClassificationConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(TextClassificationConfig.NAME), TextClassificationConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(PassThroughConfig.NAME), PassThroughConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(PassThroughConfig.NAME), PassThroughConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(TextEmbeddingConfig.NAME), TextEmbeddingConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(TextEmbeddingConfig.NAME), TextEmbeddingConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(ZeroShotClassificationConfig.NAME), ZeroShotClassificationConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(ZeroShotClassificationConfig.NAME), ZeroShotClassificationConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(QuestionAnsweringConfig.NAME), QuestionAnsweringConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(QuestionAnsweringConfig.NAME), QuestionAnsweringConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(StrictlyParsedInferenceConfig.class, new ParseField(TextSimilarityConfig.NAME), TextSimilarityConfig::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(LenientlyParsedInferenceConfig.class, new ParseField(TextSimilarityConfig.NAME), TextSimilarityConfig::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, ClassificationConfigUpdate.NAME, ClassificationConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(FillMaskConfigUpdate.NAME), FillMaskConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(NerConfigUpdate.NAME), NerConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(PassThroughConfigUpdate.NAME), PassThroughConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, RegressionConfigUpdate.NAME, RegressionConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(TextExpansionConfigUpdate.NAME), TextExpansionConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(TextClassificationConfigUpdate.NAME), TextClassificationConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(TextEmbeddingConfigUpdate.NAME), TextEmbeddingConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(ZeroShotClassificationConfigUpdate.NAME), ZeroShotClassificationConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(QuestionAnsweringConfigUpdate.NAME), QuestionAnsweringConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceConfigUpdate.class, new ParseField(TextSimilarityConfigUpdate.NAME), TextSimilarityConfigUpdate::fromXContentStrict));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceModel.class, Ensemble.NAME, EnsembleInferenceModel::fromXContent));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceModel.class, Tree.NAME, TreeInferenceModel::fromXContent));
    namedXContent.add(new NamedXContentRegistry.Entry(InferenceModel.class, LangIdentNeuralNetwork.NAME, LangIdentNeuralNetwork::fromXContentLenient));
    namedXContent.add(new NamedXContentRegistry.Entry(Tokenization.class, BertJapaneseTokenization.NAME, (p, c) -> BertJapaneseTokenization.fromXContent(p, (boolean) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(Tokenization.class, BertTokenization.NAME, (p, c) -> BertTokenization.fromXContent(p, (boolean) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(Tokenization.class, MPNetTokenization.NAME, (p, c) -> MPNetTokenization.fromXContent(p, (boolean) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(Tokenization.class, new ParseField(RobertaTokenization.NAME), (p, c) -> RobertaTokenization.fromXContent(p, (boolean) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(Tokenization.class, new ParseField(XLMRobertaTokenization.NAME), (p, c) -> XLMRobertaTokenization.fromXContent(p, (boolean) c)));
    namedXContent.add(new NamedXContentRegistry.Entry(TokenizationUpdate.class, BertJapaneseTokenizationUpdate.NAME, (p, c) -> BertJapaneseTokenizationUpdate.fromXContent(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(TokenizationUpdate.class, BertTokenizationUpdate.NAME, (p, c) -> BertTokenizationUpdate.fromXContent(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(TokenizationUpdate.class, MPNetTokenizationUpdate.NAME, (p, c) -> MPNetTokenizationUpdate.fromXContent(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(TokenizationUpdate.class, RobertaTokenizationUpdate.NAME, (p, c) -> RobertaTokenizationUpdate.fromXContent(p)));
    namedXContent.add(new NamedXContentRegistry.Entry(TokenizationUpdate.class, XLMRobertaTokenizationUpdate.NAME, (p, c) -> XLMRobertaTokenizationUpdate.fromXContent(p)));
    return namedXContent;
}
156795.424412elasticsearch
private XContentBuilder getMainIndexMappings(SecurityMainIndexMappingVersion mappingVersion) {
    try {
        final XContentBuilder builder = jsonBuilder();
        builder.startObject();
        {
            builder.startObject("_meta");
            builder.field(SECURITY_VERSION_STRING, BWC_MAPPINGS_VERSION);
            builder.field(SystemIndexDescriptor.VERSION_META_KEY, mappingVersion.id);
            builder.endObject();
            builder.field("dynamic", "strict");
            builder.startObject("properties");
            {
                builder.startObject("username");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("roles");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("role_templates");
                {
                    builder.startObject("properties");
                    {
                        builder.startObject("template");
                        builder.field("type", "text");
                        builder.endObject();
                        builder.startObject("format");
                        builder.field("type", "keyword");
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("password");
                builder.field("type", "keyword");
                builder.field("index", false);
                builder.field("doc_values", false);
                builder.endObject();
                builder.startObject("full_name");
                builder.field("type", "text");
                builder.endObject();
                builder.startObject("email");
                builder.field("type", "text");
                builder.field("analyzer", "email");
                builder.endObject();
                builder.startObject("metadata");
                builder.field("type", "object");
                builder.field("dynamic", false);
                builder.endObject();
                builder.startObject("metadata_flattened");
                builder.field("type", "flattened");
                builder.endObject();
                builder.startObject("enabled");
                builder.field("type", "boolean");
                builder.endObject();
                builder.startObject("cluster");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("indices");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("field_security");
                        {
                            builder.startObject("properties");
                            {
                                builder.startObject("grant");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("except");
                                builder.field("type", "keyword");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("names");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("privileges");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("query");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("allow_restricted_indices");
                        builder.field("type", "boolean");
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("remote_indices");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("field_security");
                        {
                            builder.startObject("properties");
                            {
                                builder.startObject("grant");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("except");
                                builder.field("type", "keyword");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("names");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("privileges");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("query");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("allow_restricted_indices");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("clusters");
                        builder.field("type", "keyword");
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                if (mappingVersion.onOrAfter(SecurityMainIndexMappingVersion.ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS)) {
                    builder.startObject("remote_cluster");
                    {
                        builder.field("type", "object");
                        builder.startObject("properties");
                        {
                            builder.startObject("clusters");
                            builder.field("type", "keyword");
                            builder.endObject();
                            builder.startObject("privileges");
                            builder.field("type", "keyword");
                            builder.endObject();
                        }
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.startObject("applications");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("application");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("privileges");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("resources");
                        builder.field("type", "keyword");
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("application");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("global");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("application");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("manage");
                                {
                                    builder.field("type", "object");
                                    builder.startObject("properties");
                                    {
                                        builder.startObject("applications");
                                        builder.field("type", "keyword");
                                        builder.endObject();
                                    }
                                    builder.endObject();
                                }
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("profile");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("write");
                                {
                                    builder.field("type", "object");
                                    builder.startObject("properties");
                                    {
                                        builder.startObject("applications");
                                        builder.field("type", "keyword");
                                        builder.endObject();
                                    }
                                    builder.endObject();
                                }
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("name");
                builder.field("type", "keyword");
                builder.endObject();
                if (mappingVersion.onOrAfter(SecurityMainIndexMappingVersion.ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS)) {
                    builder.startObject("description");
                    builder.field("type", "text");
                    builder.endObject();
                }
                builder.startObject("run_as");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("doc_type");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("type");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("actions");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("expiration_time");
                builder.field("type", "date");
                builder.field("format", "epoch_millis");
                builder.endObject();
                builder.startObject("creation_time");
                builder.field("type", "date");
                builder.field("format", "epoch_millis");
                builder.endObject();
                builder.startObject("invalidation_time");
                builder.field("type", "date");
                builder.field("format", "epoch_millis");
                builder.endObject();
                builder.startObject("api_key_hash");
                builder.field("type", "keyword");
                builder.field("index", false);
                builder.field("doc_values", false);
                builder.endObject();
                builder.startObject("api_key_invalidated");
                builder.field("type", "boolean");
                builder.endObject();
                builder.startObject("role_descriptors");
                builder.field("type", "object");
                builder.field("enabled", false);
                builder.endObject();
                builder.startObject("limited_by_role_descriptors");
                builder.field("type", "object");
                builder.field("enabled", false);
                builder.endObject();
                builder.startObject("version");
                builder.field("type", "integer");
                builder.endObject();
                builder.startObject("creator");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("principal");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("full_name");
                        builder.field("type", "text");
                        builder.endObject();
                        builder.startObject("email");
                        builder.field("type", "text");
                        builder.field("analyzer", "email");
                        builder.endObject();
                        builder.startObject("metadata");
                        builder.field("type", "object");
                        builder.field("dynamic", false);
                        builder.endObject();
                        builder.startObject("realm");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("realm_type");
                        builder.field("type", "keyword");
                        builder.endObject();
                        defineRealmDomain(builder, "realm_domain");
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("rules");
                builder.field("type", "object");
                builder.field("dynamic", false);
                builder.endObject();
                builder.startObject("refresh_token");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("token");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("refreshed");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("refresh_time");
                        builder.field("type", "date");
                        builder.field("format", "epoch_millis");
                        builder.endObject();
                        builder.startObject("superseding");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("encrypted_tokens");
                                builder.field("type", "binary");
                                builder.endObject();
                                builder.startObject("encryption_iv");
                                builder.field("type", "binary");
                                builder.endObject();
                                builder.startObject("encryption_salt");
                                builder.field("type", "binary");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("invalidated");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("client");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("type");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("user");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("realm");
                                builder.field("type", "keyword");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("access_token");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("user_token");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("id");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("expiration_time");
                                builder.field("type", "date");
                                builder.field("format", "epoch_millis");
                                builder.endObject();
                                builder.startObject("version");
                                builder.field("type", "integer");
                                builder.endObject();
                                builder.startObject("metadata");
                                builder.field("type", "object");
                                builder.field("dynamic", false);
                                builder.endObject();
                                builder.startObject("authentication");
                                builder.field("type", "binary");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("invalidated");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("realm");
                        builder.field("type", "keyword");
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
            }
            builder.endObject();
        }
        builder.endObject();
        return builder;
    } catch (IOException e) {
        logger.fatal("Failed to build " + MAIN_INDEX_CONCRETE_NAME + " index mappings", e);
        throw new UncheckedIOException("Failed to build " + MAIN_INDEX_CONCRETE_NAME + " index mappings", e);
    }
}
1510965.691379hadoop
private static void addDeprecatedKeys() {
    Configuration.addDeprecations(new DeprecationDelta[] { new DeprecationDelta("mapred.temp.dir", MRConfig.TEMP_DIR), new DeprecationDelta("mapred.local.dir", MRConfig.LOCAL_DIR), new DeprecationDelta("mapred.cluster.map.memory.mb", MRConfig.MAPMEMORY_MB), new DeprecationDelta("mapred.cluster.reduce.memory.mb", MRConfig.REDUCEMEMORY_MB), new DeprecationDelta("mapred.acls.enabled", MRConfig.MR_ACLS_ENABLED), new DeprecationDelta("mapred.cluster.max.map.memory.mb", JTConfig.JT_MAX_MAPMEMORY_MB), new DeprecationDelta("mapred.cluster.max.reduce.memory.mb", JTConfig.JT_MAX_REDUCEMEMORY_MB), new DeprecationDelta("mapred.system.dir", JTConfig.JT_SYSTEM_DIR), new DeprecationDelta("mapred.job.tracker", JTConfig.JT_IPC_ADDRESS), new DeprecationDelta("mapred.job.tracker.persist.jobstatus.active", JTConfig.JT_PERSIST_JOBSTATUS), new DeprecationDelta("mapred.permissions.supergroup", MRConfig.MR_SUPERGROUP), new DeprecationDelta("mapred.task.cache.levels", JTConfig.JT_TASKCACHE_LEVELS), new DeprecationDelta("mapred.job.tracker.retire.jobs", JTConfig.JT_RETIREJOBS), new DeprecationDelta("mapred.tasktracker.map.tasks.maximum", TTConfig.TT_MAP_SLOTS), new DeprecationDelta("mapred.tasktracker.memory_calculator_plugin", TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN), new DeprecationDelta("mapred.tasktracker.memorycalculatorplugin", TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN), new DeprecationDelta("yarn.app.mapreduce.yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts", MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS), new DeprecationDelta("job.end.notification.url", MRJobConfig.MR_JOB_END_NOTIFICATION_URL), new DeprecationDelta("job.end.retry.attempts", MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS), new DeprecationDelta("job.end.retry.interval", MRJobConfig.MR_JOB_END_RETRY_INTERVAL), new DeprecationDelta("mapred.committer.job.setup.cleanup.needed", MRJobConfig.SETUP_CLEANUP_NEEDED), new DeprecationDelta("mapred.jar", MRJobConfig.JAR), new DeprecationDelta("mapred.job.id", MRJobConfig.ID), new DeprecationDelta("mapred.job.name", MRJobConfig.JOB_NAME), new DeprecationDelta("mapred.job.priority", MRJobConfig.PRIORITY), new DeprecationDelta("mapred.job.queue.name", MRJobConfig.QUEUE_NAME), new DeprecationDelta("mapred.job.reuse.jvm.num.tasks", MRJobConfig.JVM_NUMTASKS_TORUN), new DeprecationDelta("mapred.map.tasks", MRJobConfig.NUM_MAPS), new DeprecationDelta("mapred.max.tracker.failures", MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER), new DeprecationDelta("mapred.reduce.slowstart.completed.maps", MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART), new DeprecationDelta("mapred.reduce.tasks", MRJobConfig.NUM_REDUCES), new DeprecationDelta("mapred.skip.on", MRJobConfig.SKIP_RECORDS), new DeprecationDelta("mapred.skip.out.dir", MRJobConfig.SKIP_OUTDIR), new DeprecationDelta("mapred.speculative.execution.slowTaskThreshold", MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD), new DeprecationDelta("mapred.speculative.execution.speculativeCap", MRJobConfig.SPECULATIVECAP_RUNNING_TASKS), new DeprecationDelta("job.local.dir", MRJobConfig.JOB_LOCAL_DIR), new DeprecationDelta("mapreduce.inputformat.class", MRJobConfig.INPUT_FORMAT_CLASS_ATTR), new DeprecationDelta("mapreduce.map.class", MRJobConfig.MAP_CLASS_ATTR), new DeprecationDelta("mapreduce.combine.class", MRJobConfig.COMBINE_CLASS_ATTR), new DeprecationDelta("mapreduce.reduce.class", MRJobConfig.REDUCE_CLASS_ATTR), new DeprecationDelta("mapreduce.outputformat.class", MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR), new DeprecationDelta("mapreduce.partitioner.class", MRJobConfig.PARTITIONER_CLASS_ATTR), new DeprecationDelta("mapred.job.classpath.archives", MRJobConfig.CLASSPATH_ARCHIVES), new DeprecationDelta("mapred.job.classpath.files", MRJobConfig.CLASSPATH_FILES), new DeprecationDelta("mapred.cache.files", MRJobConfig.CACHE_FILES), new DeprecationDelta("mapred.cache.archives", MRJobConfig.CACHE_ARCHIVES), new DeprecationDelta("mapred.cache.localFiles", MRJobConfig.CACHE_LOCALFILES), new DeprecationDelta("mapred.cache.localArchives", MRJobConfig.CACHE_LOCALARCHIVES), new DeprecationDelta("mapred.cache.files.filesizes", MRJobConfig.CACHE_FILES_SIZES), new DeprecationDelta("mapred.cache.archives.filesizes", MRJobConfig.CACHE_ARCHIVES_SIZES), new DeprecationDelta("mapred.cache.files.timestamps", MRJobConfig.CACHE_FILE_TIMESTAMPS), new DeprecationDelta("mapred.cache.archives.timestamps", MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS), new DeprecationDelta("mapred.working.dir", MRJobConfig.WORKING_DIR), new DeprecationDelta("user.name", MRJobConfig.USER_NAME), new DeprecationDelta("mapred.output.key.class", MRJobConfig.OUTPUT_KEY_CLASS), new DeprecationDelta("mapred.output.value.class", MRJobConfig.OUTPUT_VALUE_CLASS), new DeprecationDelta("mapred.output.value.groupfn.class", MRJobConfig.GROUP_COMPARATOR_CLASS), new DeprecationDelta("mapred.output.key.comparator.class", MRJobConfig.KEY_COMPARATOR), new DeprecationDelta("io.sort.factor", MRJobConfig.IO_SORT_FACTOR), new DeprecationDelta("io.sort.mb", MRJobConfig.IO_SORT_MB), new DeprecationDelta("keep.failed.task.files", MRJobConfig.PRESERVE_FAILED_TASK_FILES), new DeprecationDelta("keep.task.files.pattern", MRJobConfig.PRESERVE_FILES_PATTERN), new DeprecationDelta("mapred.debug.out.lines", MRJobConfig.TASK_DEBUGOUT_LINES), new DeprecationDelta("mapred.merge.recordsBeforeProgress", MRJobConfig.RECORDS_BEFORE_PROGRESS), new DeprecationDelta("mapred.merge.recordsBeforeProgress", MRJobConfig.COMBINE_RECORDS_BEFORE_PROGRESS), new DeprecationDelta("mapred.skip.attempts.to.start.skipping", MRJobConfig.SKIP_START_ATTEMPTS), new DeprecationDelta("mapred.task.id", MRJobConfig.TASK_ATTEMPT_ID), new DeprecationDelta("mapred.task.is.map", MRJobConfig.TASK_ISMAP), new DeprecationDelta("mapred.task.partition", MRJobConfig.TASK_PARTITION), new DeprecationDelta("mapred.task.profile", MRJobConfig.TASK_PROFILE), new DeprecationDelta("mapred.task.profile.maps", MRJobConfig.NUM_MAP_PROFILES), new DeprecationDelta("mapred.task.profile.reduces", MRJobConfig.NUM_REDUCE_PROFILES), new DeprecationDelta("mapred.task.timeout", MRJobConfig.TASK_TIMEOUT), new DeprecationDelta("mapred.tip.id", MRJobConfig.TASK_ID), new DeprecationDelta("mapred.work.output.dir", MRJobConfig.TASK_OUTPUT_DIR), new DeprecationDelta("mapred.userlog.limit.kb", MRJobConfig.TASK_USERLOG_LIMIT), new DeprecationDelta("mapred.task.profile.params", MRJobConfig.TASK_PROFILE_PARAMS), new DeprecationDelta("io.sort.spill.percent", MRJobConfig.MAP_SORT_SPILL_PERCENT), new DeprecationDelta("map.input.file", MRJobConfig.MAP_INPUT_FILE), new DeprecationDelta("map.input.length", MRJobConfig.MAP_INPUT_PATH), new DeprecationDelta("map.input.start", MRJobConfig.MAP_INPUT_START), new DeprecationDelta("mapred.job.map.memory.mb", MRJobConfig.MAP_MEMORY_MB), new DeprecationDelta("mapred.map.child.env", MRJobConfig.MAP_ENV), new DeprecationDelta("mapred.map.child.java.opts", MRJobConfig.MAP_JAVA_OPTS), new DeprecationDelta("mapred.map.max.attempts", MRJobConfig.MAP_MAX_ATTEMPTS), new DeprecationDelta("mapred.map.task.debug.script", MRJobConfig.MAP_DEBUG_SCRIPT), new DeprecationDelta("mapred.map.tasks.speculative.execution", MRJobConfig.MAP_SPECULATIVE), new DeprecationDelta("mapred.max.map.failures.percent", MRJobConfig.MAP_FAILURES_MAX_PERCENT), new DeprecationDelta("mapred.skip.map.auto.incr.proc.count", MRJobConfig.MAP_SKIP_INCR_PROC_COUNT), new DeprecationDelta("mapred.skip.map.max.skip.records", MRJobConfig.MAP_SKIP_MAX_RECORDS), new DeprecationDelta("min.num.spills.for.combine", MRJobConfig.MAP_COMBINE_MIN_SPILLS), new DeprecationDelta("mapred.compress.map.output", MRJobConfig.MAP_OUTPUT_COMPRESS), new DeprecationDelta("mapred.map.output.compression.codec", MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC), new DeprecationDelta("mapred.mapoutput.key.class", MRJobConfig.MAP_OUTPUT_KEY_CLASS), new DeprecationDelta("mapred.mapoutput.value.class", MRJobConfig.MAP_OUTPUT_VALUE_CLASS), new DeprecationDelta("map.output.key.field.separator", MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPARATOR), new DeprecationDelta("mapred.map.child.log.level", MRJobConfig.MAP_LOG_LEVEL), new DeprecationDelta("mapred.inmem.merge.threshold", MRJobConfig.REDUCE_MERGE_INMEM_THRESHOLD), new DeprecationDelta("mapred.job.reduce.input.buffer.percent", MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT), new DeprecationDelta("mapred.job.reduce.markreset.buffer.percent", MRJobConfig.REDUCE_MARKRESET_BUFFER_PERCENT), new DeprecationDelta("mapred.job.reduce.memory.mb", MRJobConfig.REDUCE_MEMORY_MB), new DeprecationDelta("mapred.job.reduce.total.mem.bytes", MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES), new DeprecationDelta("mapred.job.shuffle.input.buffer.percent", MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT), new DeprecationDelta("mapred.job.shuffle.merge.percent", MRJobConfig.SHUFFLE_MERGE_PERCENT), new DeprecationDelta("mapred.max.reduce.failures.percent", MRJobConfig.REDUCE_FAILURES_MAXPERCENT), new DeprecationDelta("mapred.reduce.child.env", MRJobConfig.REDUCE_ENV), new DeprecationDelta("mapred.reduce.child.java.opts", MRJobConfig.REDUCE_JAVA_OPTS), new DeprecationDelta("mapred.reduce.max.attempts", MRJobConfig.REDUCE_MAX_ATTEMPTS), new DeprecationDelta("mapred.reduce.parallel.copies", MRJobConfig.SHUFFLE_PARALLEL_COPIES), new DeprecationDelta("mapred.reduce.task.debug.script", MRJobConfig.REDUCE_DEBUG_SCRIPT), new DeprecationDelta("mapred.reduce.tasks.speculative.execution", MRJobConfig.REDUCE_SPECULATIVE), new DeprecationDelta("mapred.shuffle.connect.timeout", MRJobConfig.SHUFFLE_CONNECT_TIMEOUT), new DeprecationDelta("mapred.shuffle.read.timeout", MRJobConfig.SHUFFLE_READ_TIMEOUT), new DeprecationDelta("mapred.skip.reduce.auto.incr.proc.count", MRJobConfig.REDUCE_SKIP_INCR_PROC_COUNT), new DeprecationDelta("mapred.skip.reduce.max.skip.groups", MRJobConfig.REDUCE_SKIP_MAXGROUPS), new DeprecationDelta("mapred.reduce.child.log.level", MRJobConfig.REDUCE_LOG_LEVEL), new DeprecationDelta("mapreduce.job.counters.limit", MRJobConfig.COUNTERS_MAX_KEY), new DeprecationDelta("mapred.tasktracker.indexcache.mb", MRJobConfig.SHUFFLE_INDEX_CACHE), new DeprecationDelta("mapreduce.tasktracker.indexcache.mb", MRJobConfig.SHUFFLE_INDEX_CACHE), new DeprecationDelta("jobclient.completion.poll.interval", Job.COMPLETION_POLL_INTERVAL_KEY), new DeprecationDelta("jobclient.progress.monitor.poll.interval", Job.PROGRESS_MONITOR_POLL_INTERVAL_KEY), new DeprecationDelta("jobclient.output.filter", Job.OUTPUT_FILTER), new DeprecationDelta("mapred.submit.replication", Job.SUBMIT_REPLICATION), new DeprecationDelta("mapred.used.genericoptionsparser", Job.USED_GENERIC_PARSER), new DeprecationDelta("mapred.input.dir", org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR), new DeprecationDelta("mapred.input.pathFilter.class", org.apache.hadoop.mapreduce.lib.input.FileInputFormat.PATHFILTER_CLASS), new DeprecationDelta("mapred.max.split.size", org.apache.hadoop.mapreduce.lib.input.FileInputFormat.SPLIT_MAXSIZE), new DeprecationDelta("mapred.min.split.size", org.apache.hadoop.mapreduce.lib.input.FileInputFormat.SPLIT_MINSIZE), new DeprecationDelta("mapred.output.compress", org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS), new DeprecationDelta("mapred.output.compression.codec", org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS_CODEC), new DeprecationDelta("mapred.output.compression.type", org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS_TYPE), new DeprecationDelta("mapred.output.dir", org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.OUTDIR), new DeprecationDelta("mapred.seqbinary.output.key.class", org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat.KEY_CLASS), new DeprecationDelta("mapred.seqbinary.output.value.class", org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat.VALUE_CLASS), new DeprecationDelta("sequencefile.filter.class", org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.FILTER_CLASS), new DeprecationDelta("sequencefile.filter.regex", org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.FILTER_REGEX), new DeprecationDelta("sequencefile.filter.frequency", org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter.FILTER_FREQUENCY), new DeprecationDelta("mapred.input.dir.mappers", org.apache.hadoop.mapreduce.lib.input.MultipleInputs.DIR_MAPPERS), new DeprecationDelta("mapred.input.dir.formats", org.apache.hadoop.mapreduce.lib.input.MultipleInputs.DIR_FORMATS), new DeprecationDelta("mapred.line.input.format.linespermap", org.apache.hadoop.mapreduce.lib.input.NLineInputFormat.LINES_PER_MAP), new DeprecationDelta("mapred.binary.partitioner.left.offset", org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner.LEFT_OFFSET_PROPERTY_NAME), new DeprecationDelta("mapred.binary.partitioner.right.offset", org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner.RIGHT_OFFSET_PROPERTY_NAME), new DeprecationDelta("mapred.text.key.comparator.options", org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator.COMPARATOR_OPTIONS), new DeprecationDelta("mapred.text.key.partitioner.options", org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedPartitioner.PARTITIONER_OPTIONS), new DeprecationDelta("mapred.mapper.regex.group", org.apache.hadoop.mapreduce.lib.map.RegexMapper.GROUP), new DeprecationDelta("mapred.mapper.regex", org.apache.hadoop.mapreduce.lib.map.RegexMapper.PATTERN), new DeprecationDelta("create.empty.dir.if.nonexist", org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.CREATE_DIR), new DeprecationDelta("mapred.data.field.separator", org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper.DATA_FIELD_SEPARATOR), new DeprecationDelta("map.output.key.value.fields.spec", org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC), new DeprecationDelta("reduce.output.key.value.fields.spec", org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC), new DeprecationDelta("mapred.min.split.size.per.node", org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.SPLIT_MINSIZE_PERNODE), new DeprecationDelta("mapred.min.split.size.per.rack", org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.SPLIT_MINSIZE_PERRACK), new DeprecationDelta("key.value.separator.in.input.line", org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader.KEY_VALUE_SEPARATOR), new DeprecationDelta("mapred.linerecordreader.maxlength", org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH), new DeprecationDelta("mapred.lazy.output.format", org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat.OUTPUT_FORMAT), new DeprecationDelta("mapred.textoutputformat.separator", org.apache.hadoop.mapreduce.lib.output.TextOutputFormat.SEPARATOR), new DeprecationDelta("mapred.join.expr", org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat.JOIN_EXPR), new DeprecationDelta("mapred.join.keycomparator", org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat.JOIN_COMPARATOR), new DeprecationDelta("hadoop.pipes.command-file.keep", org.apache.hadoop.mapred.pipes.Submitter.PRESERVE_COMMANDFILE), new DeprecationDelta("hadoop.pipes.executable", org.apache.hadoop.mapred.pipes.Submitter.EXECUTABLE), new DeprecationDelta("hadoop.pipes.executable.interpretor", org.apache.hadoop.mapred.pipes.Submitter.INTERPRETOR), new DeprecationDelta("hadoop.pipes.java.mapper", org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_MAP), new DeprecationDelta("hadoop.pipes.java.recordreader", org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RR), new DeprecationDelta("hadoop.pipes.java.recordwriter", org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RW), new DeprecationDelta("hadoop.pipes.java.reducer", org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_REDUCE), new DeprecationDelta("hadoop.pipes.partitioner", org.apache.hadoop.mapred.pipes.Submitter.PARTITIONER), new DeprecationDelta("mapred.pipes.user.inputformat", org.apache.hadoop.mapred.pipes.Submitter.INPUT_FORMAT), new DeprecationDelta("security.task.umbilical.protocol.acl", MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL), new DeprecationDelta("security.job.submission.protocol.acl", MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT), new DeprecationDelta("mapreduce.user.classpath.first", MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST), new DeprecationDelta("mapred.input.dir.recursive", FileInputFormat.INPUT_DIR_RECURSIVE) });
}
154686.8157212hadoop
protected void setJobConf() throws IOException {
    if (additionalConfSpec_ != null) {
        LOG.warn("-additionalconfspec option is deprecated, please use -conf instead.");
        config_.addResource(new Path(additionalConfSpec_));
    }
    jobConf_ = new JobConf(config_, StreamJob.class);
    for (int i = 0; i < inputSpecs_.size(); i++) {
        FileInputFormat.addInputPaths(jobConf_, (String) inputSpecs_.get(i));
    }
    String defaultPackage = this.getClass().getPackage().getName();
    Class c;
    Class fmt = null;
    if (inReaderSpec_ == null && inputFormatSpec_ == null) {
        fmt = TextInputFormat.class;
    } else if (inputFormatSpec_ != null) {
        if (inputFormatSpec_.equals(TextInputFormat.class.getName()) || inputFormatSpec_.equals(TextInputFormat.class.getCanonicalName()) || inputFormatSpec_.equals(TextInputFormat.class.getSimpleName())) {
            fmt = TextInputFormat.class;
        } else if (inputFormatSpec_.equals(KeyValueTextInputFormat.class.getName()) || inputFormatSpec_.equals(KeyValueTextInputFormat.class.getCanonicalName()) || inputFormatSpec_.equals(KeyValueTextInputFormat.class.getSimpleName())) {
            if (inReaderSpec_ == null) {
                fmt = KeyValueTextInputFormat.class;
            }
        } else if (inputFormatSpec_.equals(SequenceFileInputFormat.class.getName()) || inputFormatSpec_.equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class.getCanonicalName()) || inputFormatSpec_.equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class.getSimpleName())) {
            if (inReaderSpec_ == null) {
                fmt = SequenceFileInputFormat.class;
            }
        } else if (inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class.getName()) || inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class.getCanonicalName()) || inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class.getSimpleName())) {
            fmt = SequenceFileAsTextInputFormat.class;
        } else {
            c = StreamUtil.goodClassOrNull(jobConf_, inputFormatSpec_, defaultPackage);
            if (c != null) {
                fmt = c;
            } else {
                fail("-inputformat : class not found : " + inputFormatSpec_);
            }
        }
    }
    if (fmt == null) {
        fmt = StreamInputFormat.class;
    }
    jobConf_.setInputFormat(fmt);
    if (ioSpec_ != null) {
        jobConf_.set("stream.map.input", ioSpec_);
        jobConf_.set("stream.map.output", ioSpec_);
        jobConf_.set("stream.reduce.input", ioSpec_);
        jobConf_.set("stream.reduce.output", ioSpec_);
    }
    Class<? extends IdentifierResolver> idResolverClass = jobConf_.getClass("stream.io.identifier.resolver.class", IdentifierResolver.class, IdentifierResolver.class);
    IdentifierResolver idResolver = ReflectionUtils.newInstance(idResolverClass, jobConf_);
    idResolver.resolve(jobConf_.get("stream.map.input", IdentifierResolver.TEXT_ID));
    jobConf_.setClass("stream.map.input.writer.class", idResolver.getInputWriterClass(), InputWriter.class);
    idResolver.resolve(jobConf_.get("stream.reduce.input", IdentifierResolver.TEXT_ID));
    jobConf_.setClass("stream.reduce.input.writer.class", idResolver.getInputWriterClass(), InputWriter.class);
    jobConf_.set("stream.addenvironment", addTaskEnvironment_);
    boolean isMapperACommand = false;
    if (mapCmd_ != null) {
        c = StreamUtil.goodClassOrNull(jobConf_, mapCmd_, defaultPackage);
        if (c != null) {
            jobConf_.setMapperClass(c);
        } else {
            isMapperACommand = true;
            jobConf_.setMapperClass(PipeMapper.class);
            jobConf_.setMapRunnerClass(PipeMapRunner.class);
            jobConf_.set("stream.map.streamprocessor", URLEncoder.encode(mapCmd_, "UTF-8"));
        }
    }
    if (comCmd_ != null) {
        c = StreamUtil.goodClassOrNull(jobConf_, comCmd_, defaultPackage);
        if (c != null) {
            jobConf_.setCombinerClass(c);
        } else {
            jobConf_.setCombinerClass(PipeCombiner.class);
            jobConf_.set("stream.combine.streamprocessor", URLEncoder.encode(comCmd_, "UTF-8"));
        }
    }
    if (numReduceTasksSpec_ != null) {
        int numReduceTasks = Integer.parseInt(numReduceTasksSpec_);
        jobConf_.setNumReduceTasks(numReduceTasks);
    }
    boolean isReducerACommand = false;
    if (redCmd_ != null) {
        if (redCmd_.equals(REDUCE_NONE)) {
            jobConf_.setNumReduceTasks(0);
        }
        if (jobConf_.getNumReduceTasks() != 0) {
            if (redCmd_.compareToIgnoreCase("aggregate") == 0) {
                jobConf_.setReducerClass(ValueAggregatorReducer.class);
                jobConf_.setCombinerClass(ValueAggregatorCombiner.class);
            } else {
                c = StreamUtil.goodClassOrNull(jobConf_, redCmd_, defaultPackage);
                if (c != null) {
                    jobConf_.setReducerClass(c);
                } else {
                    isReducerACommand = true;
                    jobConf_.setReducerClass(PipeReducer.class);
                    jobConf_.set("stream.reduce.streamprocessor", URLEncoder.encode(redCmd_, "UTF-8"));
                }
            }
        }
    }
    idResolver.resolve(jobConf_.get("stream.map.output", IdentifierResolver.TEXT_ID));
    jobConf_.setClass("stream.map.output.reader.class", idResolver.getOutputReaderClass(), OutputReader.class);
    if (isMapperACommand || jobConf_.get("stream.map.output") != null) {
        jobConf_.setMapOutputKeyClass(idResolver.getOutputKeyClass());
        jobConf_.setMapOutputValueClass(idResolver.getOutputValueClass());
        if (jobConf_.getNumReduceTasks() == 0) {
            jobConf_.setOutputKeyClass(idResolver.getOutputKeyClass());
            jobConf_.setOutputValueClass(idResolver.getOutputValueClass());
        }
    }
    idResolver.resolve(jobConf_.get("stream.reduce.output", IdentifierResolver.TEXT_ID));
    jobConf_.setClass("stream.reduce.output.reader.class", idResolver.getOutputReaderClass(), OutputReader.class);
    if (isReducerACommand || jobConf_.get("stream.reduce.output") != null) {
        jobConf_.setOutputKeyClass(idResolver.getOutputKeyClass());
        jobConf_.setOutputValueClass(idResolver.getOutputValueClass());
    }
    if (inReaderSpec_ != null) {
        String[] args = inReaderSpec_.split(",");
        String readerClass = args[0];
        c = StreamUtil.goodClassOrNull(jobConf_, readerClass, defaultPackage);
        if (c != null) {
            jobConf_.set("stream.recordreader.class", c.getName());
        } else {
            fail("-inputreader: class not found: " + readerClass);
        }
        for (int i = 1; i < args.length; i++) {
            String[] nv = args[i].split("=", 2);
            String k = "stream.recordreader." + nv[0];
            String v = (nv.length > 1) ? nv[1] : "";
            jobConf_.set(k, v);
        }
    }
    FileOutputFormat.setOutputPath(jobConf_, new Path(output_));
    fmt = null;
    if (outputFormatSpec_ != null) {
        c = StreamUtil.goodClassOrNull(jobConf_, outputFormatSpec_, defaultPackage);
        if (c != null) {
            fmt = c;
        } else {
            fail("-outputformat : class not found : " + outputFormatSpec_);
        }
    }
    if (fmt == null) {
        fmt = TextOutputFormat.class;
    }
    if (lazyOutput_) {
        LazyOutputFormat.setOutputFormatClass(jobConf_, fmt);
    } else {
        jobConf_.setOutputFormat(fmt);
    }
    if (partitionerSpec_ != null) {
        c = StreamUtil.goodClassOrNull(jobConf_, partitionerSpec_, defaultPackage);
        if (c != null) {
            jobConf_.setPartitionerClass(c);
        } else {
            fail("-partitioner : class not found : " + partitionerSpec_);
        }
    }
    if (mapDebugSpec_ != null) {
        jobConf_.setMapDebugScript(mapDebugSpec_);
    }
    if (reduceDebugSpec_ != null) {
        jobConf_.setReduceDebugScript(reduceDebugSpec_);
    }
    jar_ = packageJobJar();
    if (jar_ != null) {
        jobConf_.setJar(jar_);
    }
    if ((cacheArchives != null) || (cacheFiles != null)) {
        getURIs(cacheArchives, cacheFiles);
        boolean b = DistributedCache.checkURIs(fileURIs, archiveURIs);
        if (!b)
            fail(LINK_URI);
    }
    if (cacheArchives != null) {
        Job.setCacheArchives(archiveURIs, jobConf_);
    }
    if (cacheFiles != null) {
        Job.setCacheFiles(fileURIs, jobConf_);
    }
    if (verbose_) {
        listJobConfProperties();
    }
    msg("submitting to jobconf: " + getJobTrackerHostPort());
}
153496.1558237wildfly
public void configure(final DeploymentPhaseContext context, final ComponentDescription description, final ComponentConfiguration configuration) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = context.getDeploymentUnit();
    final DeploymentReflectionIndex deploymentReflectionIndex = deploymentUnit.getAttachment(REFLECTION_INDEX);
    final EEApplicationClasses applicationClasses = deploymentUnit.getAttachment(Attachments.EE_APPLICATION_CLASSES_DESCRIPTION);
    final EEModuleDescription moduleDescription = deploymentUnit.getAttachment(Attachments.EE_MODULE_DESCRIPTION);
    final Module module = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.MODULE);
    final boolean metadataComplete = MetadataCompleteMarker.isMetadataComplete(deploymentUnit);
    final Deque<InterceptorFactory> instantiators = new ArrayDeque<>();
    final Deque<InterceptorFactory> injectors = new ArrayDeque<>();
    final Deque<InterceptorFactory> uninjectors = new ArrayDeque<>();
    final Deque<InterceptorFactory> destructors = new ArrayDeque<>();
    final Map<String, List<InterceptorFactory>> userAroundInvokesByInterceptorClass = new HashMap<>();
    final Map<String, List<InterceptorFactory>> userAroundConstructsByInterceptorClass = new HashMap<String, List<InterceptorFactory>>();
    final Map<String, List<InterceptorFactory>> userAroundTimeoutsByInterceptorClass;
    final Map<String, List<InterceptorFactory>> userPrePassivatesByInterceptorClass;
    final Map<String, List<InterceptorFactory>> userPostActivatesByInterceptorClass;
    final Map<String, List<InterceptorFactory>> userPostConstructByInterceptorClass = new HashMap<String, List<InterceptorFactory>>();
    final Map<String, List<InterceptorFactory>> userPreDestroyByInterceptorClass = new HashMap<String, List<InterceptorFactory>>();
    final Set<MethodIdentifier> timeoutMethods = description.getTimerMethods();
    if (description.isTimerServiceRequired()) {
        userAroundTimeoutsByInterceptorClass = new HashMap<>();
    } else {
        userAroundTimeoutsByInterceptorClass = null;
    }
    if (description.isPassivationApplicable()) {
        userPrePassivatesByInterceptorClass = new HashMap<>();
        userPostActivatesByInterceptorClass = new HashMap<>();
    } else {
        userPrePassivatesByInterceptorClass = null;
        userPostActivatesByInterceptorClass = null;
    }
    final InterceptorFactory instantiator;
    final ComponentFactory instanceFactory = configuration.getInstanceFactory();
    if (instanceFactory != null) {
        instantiator = new ImmediateInterceptorFactory(new ComponentInstantiatorInterceptor(instanceFactory, BasicComponentInstance.INSTANCE_KEY, true));
    } else {
        final ClassReflectionIndex componentClassIndex = deploymentReflectionIndex.getClassIndex(configuration.getComponentClass());
        final Constructor<?> constructor = componentClassIndex.getConstructor(EMPTY_CLASS_ARRAY);
        if (constructor == null) {
            throw EeLogger.ROOT_LOGGER.defaultConstructorNotFound(configuration.getComponentClass());
        }
        instantiator = new ImmediateInterceptorFactory(new ComponentInstantiatorInterceptor(new ConstructorComponentFactory(constructor), BasicComponentInstance.INSTANCE_KEY, true));
    }
    final List<InterceptorDescription> interceptorWithLifecycleCallbacks = new ArrayList<InterceptorDescription>();
    if (!description.isExcludeDefaultInterceptors()) {
        interceptorWithLifecycleCallbacks.addAll(description.getDefaultInterceptors());
    }
    interceptorWithLifecycleCallbacks.addAll(description.getClassInterceptors());
    for (final InterceptorDescription interceptorDescription : description.getAllInterceptors()) {
        final String interceptorClassName = interceptorDescription.getInterceptorClassName();
        final Class<?> interceptorClass;
        try {
            interceptorClass = ClassLoadingUtils.loadClass(interceptorClassName, module);
        } catch (ClassNotFoundException e) {
            throw EeLogger.ROOT_LOGGER.cannotLoadInterceptor(e, interceptorClassName);
        }
        final InterceptorEnvironment interceptorEnvironment = moduleDescription.getInterceptorEnvironment().get(interceptorClassName);
        if (interceptorEnvironment != null) {
            description.getBindingConfigurations().addAll(interceptorEnvironment.getBindingConfigurations());
            for (final ResourceInjectionConfiguration injection : interceptorEnvironment.getResourceInjections()) {
                description.addResourceInjection(injection);
            }
        }
        final Object contextKey = interceptorClass;
        configuration.getInterceptorContextKeys().add(contextKey);
        final ClassReflectionIndex interceptorIndex = deploymentReflectionIndex.getClassIndex(interceptorClass);
        final Constructor<?> constructor = interceptorIndex.getConstructor(EMPTY_CLASS_ARRAY);
        if (constructor == null) {
            throw EeLogger.ROOT_LOGGER.defaultConstructorNotFoundOnComponent(interceptorClassName, configuration.getComponentClass());
        }
        instantiators.addFirst(new ImmediateInterceptorFactory(new ComponentInstantiatorInterceptor(new ConstructorComponentFactory(constructor), contextKey, false)));
        destructors.addLast(new ImmediateInterceptorFactory(new ManagedReferenceReleaseInterceptor(contextKey)));
        final boolean interceptorHasLifecycleCallbacks = interceptorWithLifecycleCallbacks.contains(interceptorDescription);
        new ClassDescriptionTraversal(interceptorClass, applicationClasses) {

            @Override
            public void handle(final Class<?> clazz, EEModuleClassDescription classDescription) throws DeploymentUnitProcessingException {
                mergeInjectionsForClass(clazz, interceptorClass, classDescription, moduleDescription, deploymentReflectionIndex, description, configuration, context, injectors, contextKey, uninjectors, metadataComplete);
                final InterceptorClassDescription interceptorConfig;
                if (classDescription != null && !metadataComplete) {
                    interceptorConfig = InterceptorClassDescription.merge(classDescription.getInterceptorClassDescription(), moduleDescription.getInterceptorClassOverride(clazz.getName()));
                } else {
                    interceptorConfig = InterceptorClassDescription.merge(null, moduleDescription.getInterceptorClassOverride(clazz.getName()));
                }
                if (interceptorHasLifecycleCallbacks && !description.isIgnoreLifecycleInterceptors()) {
                    final MethodIdentifier postConstructMethodIdentifier = interceptorConfig.getPostConstruct();
                    handleInterceptorClass(clazz, postConstructMethodIdentifier, userPostConstructByInterceptorClass, true, true);
                    final MethodIdentifier preDestroyMethodIdentifier = interceptorConfig.getPreDestroy();
                    handleInterceptorClass(clazz, preDestroyMethodIdentifier, userPreDestroyByInterceptorClass, true, true);
                    final MethodIdentifier aroundConstructMethodIdentifier = interceptorConfig.getAroundConstruct();
                    handleInterceptorClass(clazz, aroundConstructMethodIdentifier, userAroundConstructsByInterceptorClass, true, true);
                }
                final MethodIdentifier aroundInvokeMethodIdentifier = interceptorConfig.getAroundInvoke();
                handleInterceptorClass(clazz, aroundInvokeMethodIdentifier, userAroundInvokesByInterceptorClass, false, false);
                if (description.isTimerServiceRequired()) {
                    final MethodIdentifier aroundTimeoutMethodIdentifier = interceptorConfig.getAroundTimeout();
                    handleInterceptorClass(clazz, aroundTimeoutMethodIdentifier, userAroundTimeoutsByInterceptorClass, false, false);
                }
                if (description.isPassivationApplicable()) {
                    handleInterceptorClass(clazz, interceptorConfig.getPrePassivate(), userPrePassivatesByInterceptorClass, false, false);
                    handleInterceptorClass(clazz, interceptorConfig.getPostActivate(), userPostActivatesByInterceptorClass, false, false);
                }
            }

            private void handleInterceptorClass(final Class<?> clazz, final MethodIdentifier methodIdentifier, final Map<String, List<InterceptorFactory>> classMap, final boolean changeMethod, final boolean lifecycleMethod) throws DeploymentUnitProcessingException {
                if (methodIdentifier != null) {
                    final Method method = ClassReflectionIndexUtil.findRequiredMethod(deploymentReflectionIndex, clazz, methodIdentifier);
                    if (isNotOverriden(clazz, method, interceptorClass, deploymentReflectionIndex)) {
                        final InterceptorFactory interceptorFactory = new ImmediateInterceptorFactory(new ManagedReferenceLifecycleMethodInterceptor(contextKey, method, changeMethod, lifecycleMethod));
                        List<InterceptorFactory> factories = classMap.get(interceptorClassName);
                        if (factories == null) {
                            classMap.put(interceptorClassName, factories = new ArrayList<InterceptorFactory>());
                        }
                        factories.add(interceptorFactory);
                    }
                }
            }
        }.run();
    }
    final List<InterceptorFactory> userAroundConstruct = new ArrayList<InterceptorFactory>();
    final List<InterceptorFactory> userPostConstruct = new ArrayList<InterceptorFactory>();
    final List<InterceptorFactory> userPreDestroy = new ArrayList<InterceptorFactory>();
    final List<InterceptorFactory> userPrePassivate = new ArrayList<InterceptorFactory>();
    final List<InterceptorFactory> userPostActivate = new ArrayList<InterceptorFactory>();
    for (final InterceptorDescription interceptorClass : interceptorWithLifecycleCallbacks) {
        if (userPostConstructByInterceptorClass.containsKey(interceptorClass.getInterceptorClassName())) {
            userPostConstruct.addAll(userPostConstructByInterceptorClass.get(interceptorClass.getInterceptorClassName()));
        }
        if (userAroundConstructsByInterceptorClass.containsKey(interceptorClass.getInterceptorClassName())) {
            userAroundConstruct.addAll(userAroundConstructsByInterceptorClass.get(interceptorClass.getInterceptorClassName()));
        }
        if (userPreDestroyByInterceptorClass.containsKey(interceptorClass.getInterceptorClassName())) {
            userPreDestroy.addAll(userPreDestroyByInterceptorClass.get(interceptorClass.getInterceptorClassName()));
        }
        if (description.isPassivationApplicable()) {
            if (userPrePassivatesByInterceptorClass.containsKey(interceptorClass.getInterceptorClassName())) {
                userPrePassivate.addAll(userPrePassivatesByInterceptorClass.get(interceptorClass.getInterceptorClassName()));
            }
            if (userPostActivatesByInterceptorClass.containsKey(interceptorClass.getInterceptorClassName())) {
                userPostActivate.addAll(userPostActivatesByInterceptorClass.get(interceptorClass.getInterceptorClassName()));
            }
        }
    }
    if (!injectors.isEmpty()) {
        configuration.addPostConstructInterceptors(new ArrayList<>(injectors), InterceptorOrder.ComponentPostConstruct.INTERCEPTOR_RESOURCE_INJECTION_INTERCEPTORS);
    }
    if (!instantiators.isEmpty()) {
        configuration.addPostConstructInterceptors(new ArrayList<>(instantiators), InterceptorOrder.ComponentPostConstruct.INTERCEPTOR_INSTANTIATION_INTERCEPTORS);
    }
    if (!userAroundConstruct.isEmpty()) {
        configuration.addAroundConstructInterceptors(userAroundConstruct, InterceptorOrder.AroundConstruct.INTERCEPTOR_AROUND_CONSTRUCT);
    }
    configuration.addAroundConstructInterceptor(instantiator, InterceptorOrder.AroundConstruct.CONSTRUCT_COMPONENT);
    configuration.addAroundConstructInterceptor(new ImmediateInterceptorFactory(Interceptors.getTerminalInterceptor()), InterceptorOrder.AroundConstruct.TERMINAL_INTERCEPTOR);
    if (!configuration.getAroundConstructInterceptors().isEmpty()) {
        configuration.addPostConstructInterceptor(new AroundConstructInterceptorFactory(Interceptors.getChainedInterceptorFactory(configuration.getAroundConstructInterceptors())), InterceptorOrder.ComponentPostConstruct.AROUND_CONSTRUCT_CHAIN);
    }
    if (!userPostConstruct.isEmpty()) {
        configuration.addPostConstructInterceptors(userPostConstruct, InterceptorOrder.ComponentPostConstruct.INTERCEPTOR_USER_INTERCEPTORS);
    }
    if (!uninjectors.isEmpty()) {
        configuration.addPreDestroyInterceptors(new ArrayList<>(uninjectors), InterceptorOrder.ComponentPreDestroy.INTERCEPTOR_UNINJECTION_INTERCEPTORS);
    }
    if (!destructors.isEmpty()) {
        configuration.addPreDestroyInterceptors(new ArrayList<>(destructors), InterceptorOrder.ComponentPreDestroy.INTERCEPTOR_DESTRUCTION_INTERCEPTORS);
    }
    if (!userPreDestroy.isEmpty()) {
        configuration.addPreDestroyInterceptors(userPreDestroy, InterceptorOrder.ComponentPreDestroy.INTERCEPTOR_USER_INTERCEPTORS);
    }
    if (description.isPassivationApplicable()) {
        if (!userPrePassivate.isEmpty()) {
            configuration.addPrePassivateInterceptors(userPrePassivate, InterceptorOrder.ComponentPassivation.INTERCEPTOR_USER_INTERCEPTORS);
        }
        if (!userPostActivate.isEmpty()) {
            configuration.addPostActivateInterceptors(userPostActivate, InterceptorOrder.ComponentPassivation.INTERCEPTOR_USER_INTERCEPTORS);
        }
    }
    final List<InterceptorDescription> classInterceptors = description.getClassInterceptors();
    final Map<MethodIdentifier, List<InterceptorDescription>> methodInterceptors = description.getMethodInterceptors();
    if (description.isIntercepted()) {
        for (final Method method : configuration.getDefinedComponentMethods()) {
            final MethodIdentifier identifier = MethodIdentifier.getIdentifier(method.getReturnType(), method.getName(), method.getParameterTypes());
            final List<InterceptorFactory> userAroundInvokes = new ArrayList<InterceptorFactory>();
            final List<InterceptorFactory> userAroundTimeouts = new ArrayList<InterceptorFactory>();
            final boolean requiresTimerChain = description.isTimerServiceRequired() && timeoutMethods.contains(identifier);
            if (!description.isExcludeDefaultInterceptors() && !description.isExcludeDefaultInterceptors(identifier)) {
                for (InterceptorDescription interceptorDescription : description.getDefaultInterceptors()) {
                    String interceptorClassName = interceptorDescription.getInterceptorClassName();
                    List<InterceptorFactory> aroundInvokes = userAroundInvokesByInterceptorClass.get(interceptorClassName);
                    if (aroundInvokes != null) {
                        userAroundInvokes.addAll(aroundInvokes);
                    }
                    if (requiresTimerChain) {
                        List<InterceptorFactory> aroundTimeouts = userAroundTimeoutsByInterceptorClass.get(interceptorClassName);
                        if (aroundTimeouts != null) {
                            userAroundTimeouts.addAll(aroundTimeouts);
                        }
                    }
                }
            }
            if (!description.isExcludeClassInterceptors(identifier)) {
                for (InterceptorDescription interceptorDescription : classInterceptors) {
                    String interceptorClassName = interceptorDescription.getInterceptorClassName();
                    List<InterceptorFactory> aroundInvokes = userAroundInvokesByInterceptorClass.get(interceptorClassName);
                    if (aroundInvokes != null) {
                        userAroundInvokes.addAll(aroundInvokes);
                    }
                    if (requiresTimerChain) {
                        List<InterceptorFactory> aroundTimeouts = userAroundTimeoutsByInterceptorClass.get(interceptorClassName);
                        if (aroundTimeouts != null) {
                            userAroundTimeouts.addAll(aroundTimeouts);
                        }
                    }
                }
            }
            List<InterceptorDescription> methodLevelInterceptors = methodInterceptors.get(identifier);
            if (methodLevelInterceptors != null) {
                for (InterceptorDescription methodLevelInterceptor : methodLevelInterceptors) {
                    String interceptorClassName = methodLevelInterceptor.getInterceptorClassName();
                    List<InterceptorFactory> aroundInvokes = userAroundInvokesByInterceptorClass.get(interceptorClassName);
                    if (aroundInvokes != null) {
                        userAroundInvokes.addAll(aroundInvokes);
                    }
                    if (requiresTimerChain) {
                        List<InterceptorFactory> aroundTimeouts = userAroundTimeoutsByInterceptorClass.get(interceptorClassName);
                        if (aroundTimeouts != null) {
                            userAroundTimeouts.addAll(aroundTimeouts);
                        }
                    }
                }
            }
            if (requiresTimerChain) {
                configuration.addComponentInterceptor(method, new UserInterceptorFactory(weaved(userAroundInvokes), weaved(userAroundTimeouts)), InterceptorOrder.Component.INTERCEPTOR_USER_INTERCEPTORS);
            } else {
                configuration.addComponentInterceptors(method, userAroundInvokes, InterceptorOrder.Component.INTERCEPTOR_USER_INTERCEPTORS);
            }
        }
    }
}
165319.9438241cassandra
public void doVerb(final Message<RepairMessage> message) {
    RepairJobDesc desc = message.payload.desc;
    try {
        switch(message.verb()) {
            case PREPARE_MSG:
                {
                    PrepareMessage prepareMessage = (PrepareMessage) message.payload;
                    logger.debug("Preparing, {}", prepareMessage);
                    ParticipateState state = new ParticipateState(ctx.clock(), message.from(), prepareMessage);
                    if (!ctx.repair().register(state)) {
                        replyDedup(ctx.repair().participate(state.id), message);
                        return;
                    }
                    if (!ctx.repair().verifyCompactionsPendingThreshold(prepareMessage.parentRepairSession, prepareMessage.previewKind)) {
                        state.phase.fail("Too many pending compactions");
                        sendFailureResponse(message);
                        return;
                    }
                    List<ColumnFamilyStore> columnFamilyStores = new ArrayList<>(prepareMessage.tableIds.size());
                    for (TableId tableId : prepareMessage.tableIds) {
                        ColumnFamilyStore columnFamilyStore = ColumnFamilyStore.getIfExists(tableId);
                        if (columnFamilyStore == null) {
                            String reason = String.format("Table with id %s was dropped during prepare phase of repair", tableId);
                            state.phase.fail(reason);
                            logErrorAndSendFailureResponse(reason, message);
                            return;
                        }
                        columnFamilyStores.add(columnFamilyStore);
                    }
                    state.phase.accept();
                    ctx.repair().registerParentRepairSession(prepareMessage.parentRepairSession, message.from(), columnFamilyStores, prepareMessage.ranges, prepareMessage.isIncremental, prepareMessage.repairedAt, prepareMessage.isGlobal, prepareMessage.previewKind);
                    sendAck(message);
                }
                break;
            case SNAPSHOT_MSG:
                {
                    logger.debug("Snapshotting {}", desc);
                    ParticipateState state = ctx.repair().participate(desc.parentSessionId);
                    if (state == null) {
                        logErrorAndSendFailureResponse("Unknown repair " + desc.parentSessionId, message);
                        return;
                    }
                    final ColumnFamilyStore cfs = ColumnFamilyStore.getIfExists(desc.keyspace, desc.columnFamily);
                    if (cfs == null) {
                        String reason = String.format("Table %s.%s was dropped during snapshot phase of repair %s", desc.keyspace, desc.columnFamily, desc.parentSessionId);
                        state.phase.fail(reason);
                        logErrorAndSendFailureResponse(reason, message);
                        return;
                    }
                    ActiveRepairService.ParentRepairSession prs = ctx.repair().getParentRepairSession(desc.parentSessionId);
                    if (prs.setHasSnapshots()) {
                        state.getOrCreateJob(desc).snapshot();
                        TableRepairManager repairManager = cfs.getRepairManager();
                        if (prs.isGlobal) {
                            repairManager.snapshot(desc.parentSessionId.toString(), prs.getRanges(), false);
                        } else {
                            repairManager.snapshot(desc.parentSessionId.toString(), desc.ranges, true);
                        }
                        logger.debug("Enqueuing response to snapshot request {} to {}", desc.sessionId, message.from());
                    }
                    sendAck(message);
                }
                break;
            case VALIDATION_REQ:
                {
                    ValidationRequest validationRequest = (ValidationRequest) message.payload;
                    logger.debug("Validating {}", validationRequest);
                    ParticipateState participate = ctx.repair().participate(desc.parentSessionId);
                    if (participate == null) {
                        logErrorAndSendFailureResponse("Unknown repair " + desc.parentSessionId, message);
                        return;
                    }
                    ValidationState vState = new ValidationState(ctx.clock(), desc, message.from());
                    if (!register(message, participate, vState, participate::register, (d, i) -> participate.validation(d)))
                        return;
                    try {
                        ColumnFamilyStore store = ColumnFamilyStore.getIfExists(desc.keyspace, desc.columnFamily);
                        if (store == null) {
                            String msg = String.format("Table %s.%s was dropped during validation phase of repair %s", desc.keyspace, desc.columnFamily, desc.parentSessionId);
                            vState.phase.fail(msg);
                            logErrorAndSendFailureResponse(msg, message);
                            return;
                        }
                        try {
                            ctx.repair().consistent.local.maybeSetRepairing(desc.parentSessionId);
                        } catch (Throwable t) {
                            JVMStabilityInspector.inspectThrowable(t);
                            vState.phase.fail(t.toString());
                            logErrorAndSendFailureResponse(t.toString(), message);
                            return;
                        }
                        PreviewKind previewKind;
                        try {
                            previewKind = previewKind(desc.parentSessionId);
                        } catch (NoSuchRepairSessionException e) {
                            logger.warn("Parent repair session {} has been removed, failing repair", desc.parentSessionId);
                            vState.phase.fail(e);
                            sendFailureResponse(message);
                            return;
                        }
                        if (!acceptMessage(validationRequest, ctx.broadcastAddressAndPort(), message.from())) {
                            RepairOutOfTokenRangeException e = new RepairOutOfTokenRangeException(validationRequest.desc.ranges);
                            logger.error("Got out-of-range repair request from " + message.from() + ": " + validationRequest.desc.ranges, e);
                            vState.phase.fail(e);
                            sendFailureResponse(message);
                            return;
                        }
                        vState.phase.accept();
                        sendAck(message);
                        Validator validator = new Validator(ctx, vState, validationRequest.nowInSec, isIncremental(desc.parentSessionId), previewKind);
                        ctx.validationManager().submitValidation(store, validator);
                    } catch (Throwable t) {
                        vState.phase.fail(t);
                        throw t;
                    }
                }
                break;
            case SYNC_REQ:
                {
                    SyncRequest request = (SyncRequest) message.payload;
                    logger.debug("Syncing {}", request);
                    ParticipateState participate = ctx.repair().participate(desc.parentSessionId);
                    if (participate == null) {
                        logErrorAndSendFailureResponse("Unknown repair " + desc.parentSessionId, message);
                        return;
                    }
                    SyncState state = new SyncState(ctx.clock(), desc, request.initiator, request.src, request.dst);
                    if (!register(message, participate, state, participate::register, participate::sync))
                        return;
                    state.phase.accept();
                    StreamingRepairTask task = new StreamingRepairTask(ctx, state, desc, request.initiator, request.src, request.dst, request.ranges, isIncremental(desc.parentSessionId) ? desc.parentSessionId : null, request.previewKind, request.asymmetric);
                    task.run();
                    sendAck(message);
                }
                break;
            case CLEANUP_MSG:
                {
                    logger.debug("cleaning up repair");
                    CleanupMessage cleanup = (CleanupMessage) message.payload;
                    ParticipateState state = ctx.repair().participate(cleanup.parentRepairSession);
                    if (state != null)
                        state.phase.success("Cleanup message recieved");
                    ctx.repair().removeParentRepairSession(cleanup.parentRepairSession);
                    sendAck(message);
                }
                break;
            case PREPARE_CONSISTENT_REQ:
                ctx.repair().consistent.local.handlePrepareMessage(message);
                break;
            case PREPARE_CONSISTENT_RSP:
                ctx.repair().consistent.coordinated.handlePrepareResponse(message);
                break;
            case FINALIZE_PROPOSE_MSG:
                ctx.repair().consistent.local.handleFinalizeProposeMessage(message);
                break;
            case FINALIZE_PROMISE_MSG:
                ctx.repair().consistent.coordinated.handleFinalizePromiseMessage(message);
                break;
            case FINALIZE_COMMIT_MSG:
                ctx.repair().consistent.local.handleFinalizeCommitMessage(message);
                break;
            case FAILED_SESSION_MSG:
                FailSession failure = (FailSession) message.payload;
                sendAck(message);
                ParticipateState p = ctx.repair().participate(failure.sessionID);
                if (p != null)
                    p.phase.fail("Failure message from " + message.from());
                ctx.repair().consistent.coordinated.handleFailSessionMessage(failure);
                ctx.repair().consistent.local.handleFailSessionMessage(message.from(), failure);
                break;
            case STATUS_REQ:
                ctx.repair().consistent.local.handleStatusRequest(message.from(), (StatusRequest) message.payload);
                break;
            case STATUS_RSP:
                ctx.repair().consistent.local.handleStatusResponse(message.from(), (StatusResponse) message.payload);
                break;
            default:
                ctx.repair().handleMessage(message);
                break;
        }
    } catch (Exception e) {
        logger.error("Got error, removing parent repair session");
        if (desc != null && desc.parentSessionId != null) {
            ParticipateState parcipate = ctx.repair().participate(desc.parentSessionId);
            if (parcipate != null)
                parcipate.phase.fail(e);
            ctx.repair().removeParentRepairSession(desc.parentSessionId);
        }
        throw new RuntimeException(e);
    }
}
1610721.381348cassandra
public void testGroupByWithoutPaging() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))" + compactOption);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)");
    execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12");
    execute("DELETE FROM %s WHERE a = 3");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b"), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
    assertEmpty(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b IN () GROUP BY a, b ALLOW FILTERING"));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b LIMIT 2"), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c LIMIT 3"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b LIMIT 3"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3), row(2, 2, 3, 3), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6), row(2, 2, 3, 3, 6), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a"), row(1, 1L), row(2, 1L), row(4, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b");
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a LIMIT 2"), row(1, 1L), row(2, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b LIMIT 2");
    assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN", "SELECT a, b, c, count(b), max(e) FROM %s GROUP BY a, b ORDER BY b DESC, c DESC");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 and token(a) = token(1) GROUP BY b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a"), row(1, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a, b");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 10"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 1"), row(1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 10"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 1"), row(1L, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"), row(1, 2, 1, 3, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC"), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12), row(1, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"), row(1, 4, 24, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2"), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a"), row(1, 1L), row(2, 1L), row(4, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b");
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2"), row(1, 1L), row(2, 1L));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 1"), row(1, 2, 6, 1L, 6), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6), row(2, 2, 3, 3, 6), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, c, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC"), row(4, 8, 2, 1L, 24), row(2, 4, 3, 1L, 12), row(1, 4, 2, 2L, 24), row(2, 2, 3, 1L, 6), row(1, 2, 2, 2L, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC"), row(4, 8, 2, 12), row(2, 4, 3, 6), row(1, 4, 2, 12), row(2, 2, 3, 3), row(1, 2, 2, 6), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 3"), row(4, 8, 2, 12), row(2, 4, 3, 6), row(1, 4, 2, 12));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 3"), row(4, 8, 2, 12, 24), row(2, 4, 3, 6, 12), row(1, 4, 2, 12, 24));
    assertInvalidMessage("Group by is currently only supported on the columns of the PRIMARY KEY, got e", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, e");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY c");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, c, b");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, a");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, c, d FROM %s WHERE token(a) = token(1) GROUP BY b, c");
    assertInvalidMessage("Undefined column name clustering1", "SELECT a, b as clustering1, max(c) FROM %s WHERE a = 1 GROUP BY a, clustering1");
    assertInvalidMessage("Undefined column name z", "SELECT a, b, max(c) FROM %s WHERE a = 1 GROUP BY a, b, z");
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))" + compactOption);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
    assertInvalidMessage("Group by is not supported on only a part of the partition key", "SELECT a, b, max(d) FROM %s GROUP BY a");
    assertRows(execute("SELECT a, b, max(d) FROM %s GROUP BY a, b"), row(1, 2, 12), row(1, 1, 12));
    assertRows(execute("SELECT a, b, max(d) FROM %s WHERE a = 1 AND b = 1 GROUP BY b"), row(1, 1, 12));
    createTable("CREATE TABLE %s (a int primary key, b int, c int)" + compactOption);
    execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
    execute("INSERT INTO %s (a, b, c) VALUES (2, 6, 12)");
    execute("INSERT INTO %s (a, b, c) VALUES (3, 12, 24)");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, max(c) FROM %s WHERE a = 1 GROUP BY a, a");
}
1610646.421347cassandra
public void testGroupByWithoutPaging() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)");
    execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12");
    execute("DELETE FROM %s WHERE a = 3");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b"), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
    assertEmpty(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE b IN () GROUP BY a, b ALLOW FILTERING"));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b LIMIT 2"), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a PER PARTITION LIMIT 2"), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c LIMIT 3"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b LIMIT 3"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3), row(2, 2, 3, 3), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6), row(2, 2, 3, 3, 6), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a"), row(1, 1L), row(2, 1L), row(4, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b");
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s GROUP BY a LIMIT 2"), row(1, 1L), row(2, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s GROUP BY a, b LIMIT 2");
    assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN", "SELECT a, b, c, count(b), max(e) FROM %s GROUP BY a, b ORDER BY b DESC, c DESC");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 and token(a) = token(1) GROUP BY b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a"), row(1, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a, b");
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 10"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 1"), row(1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 10"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 1"), row(1L, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1"), row(1, 2, 1, 3, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 4, 2, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3), row(1, 2, 2, 6));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC"), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12), row(1, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"), row(1, 4, 24, 2L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2"), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b"), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a"), row(1, 1L), row(2, 1L), row(4, 1L));
    assertInvalidMessage("Grouping on clustering columns is not allowed for SELECT DISTINCT queries", "SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b");
    assertRows(execute("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2"), row(1, 1L), row(2, 1L));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 1"), row(1, 2, 6, 1L, 6), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2"), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 3, 6), row(2, 2, 3, 3, 6), row(4, 8, 2, 12, 24));
    assertRows(execute("SELECT a, b, c, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC"), row(4, 8, 2, 1L, 24), row(2, 4, 3, 1L, 12), row(1, 4, 2, 2L, 24), row(2, 2, 3, 1L, 6), row(1, 2, 2, 2L, 12));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC"), row(4, 8, 2, 12), row(2, 4, 3, 6), row(1, 4, 2, 12), row(2, 2, 3, 3), row(1, 2, 2, 6), row(1, 2, 1, 3));
    assertRows(execute("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 3"), row(4, 8, 2, 12), row(2, 4, 3, 6), row(1, 4, 2, 12));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 3"), row(4, 8, 2, 12, 24), row(2, 4, 3, 6, 12), row(1, 4, 2, 12, 24));
    assertInvalidMessage("Group by is currently only supported on the columns of the PRIMARY KEY, got e", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, e");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY c");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, c, b");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, d, count(b), max(c) FROM %s WHERE a = 1 GROUP BY a, a");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, b, c, d FROM %s WHERE token(a) = token(1) GROUP BY b, c");
    assertInvalidMessage("Undefined column name clustering1", "SELECT a, b as clustering1, max(c) FROM %s WHERE a = 1 GROUP BY a, clustering1");
    assertInvalidMessage("Undefined column name z", "SELECT a, b, max(c) FROM %s WHERE a = 1 GROUP BY a, b, z");
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key ((a, b), c, d))");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 1, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 1, 3, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
    assertInvalidMessage("Group by is not supported on only a part of the partition key", "SELECT a, b, max(d) FROM %s GROUP BY a");
    assertRows(execute("SELECT a, b, max(d) FROM %s GROUP BY a, b"), row(1, 2, 12), row(1, 1, 12));
    assertRows(execute("SELECT a, b, max(d) FROM %s WHERE a = 1 AND b = 1 GROUP BY b"), row(1, 1, 12));
    createTable("CREATE TABLE %s (a int primary key, b int, c int)");
    execute("INSERT INTO %s (a, b, c) VALUES (1, 3, 6)");
    execute("INSERT INTO %s (a, b, c) VALUES (2, 6, 12)");
    execute("INSERT INTO %s (a, b, c) VALUES (3, 12, 24)");
    assertInvalidMessage("Group by currently only support groups of columns following their declared order in the PRIMARY KEY", "SELECT a, max(c) FROM %s WHERE a = 1 GROUP BY a, a");
}
1612261.887296cassandra
public void testAllTypes() throws Throwable {
    String myType = createType("CREATE TYPE %s (a int, b uuid, c set<text>)");
    createTable("CREATE TABLE %s (" + "k int PRIMARY KEY, " + "asciival ascii, " + "bigintval bigint, " + "blobval blob, " + "booleanval boolean, " + "dateval date, " + "decimalval decimal, " + "doubleval double, " + "floatval float, " + "inetval inet, " + "intval int, " + "textval text, " + "timeval time, " + "timestampval timestamp, " + "timeuuidval timeuuid, " + "uuidval uuid," + "varcharval varchar, " + "varintval varint, " + "listval list<int>, " + "frozenlistval frozen<list<int>>, " + "setval set<uuid>, " + "frozensetval frozen<set<uuid>>, " + "mapval map<ascii, int>," + "frozenmapval frozen<map<ascii, int>>," + "tupleval frozen<tuple<int, ascii, uuid>>," + "udtval frozen<" + myType + ">)");
    TableMetadata metadata = currentTableMetadata();
    for (ColumnMetadata def : new HashSet<>(metadata.columns())) {
        try {
            createView("mv_" + def.name, "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL PRIMARY KEY (" + def.name + ",k)");
            if (def.type.isMultiCell())
                Assert.fail("MV on a multicell should fail " + def);
            if (def.isPartitionKey())
                Assert.fail("MV on partition key should fail " + def);
        } catch (Exception e) {
            if (!def.type.isMultiCell() && !def.isPartitionKey())
                Assert.fail("MV creation failed on " + def);
        }
    }
    assertInvalidMessage("from_json() cannot be used in the selection clause", "SELECT from_json(asciival) FROM %s", 0, 0);
    String func1 = createFunction(KEYSPACE, "int", "CREATE FUNCTION %s (a int) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS $$ return a.toString(); $$");
    createFunctionOverload(func1, "int", "CREATE FUNCTION %s (a text) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS $$ return new String(a); $$");
    updateView("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii text\"");
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii \\\" text\"");
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0), row(0, "ascii \" text"));
    assertRows(execute("SELECT asciival FROM %s WHERE k = from_json(?)", "0"), row("ascii \" text"));
    assertRows(execute("SELECT k, udtval from mv_asciival WHERE asciival = ?", "ascii text"));
    assertRows(execute("SELECT k, udtval from mv_asciival WHERE asciival = ?", "ascii \" text"), row(0, null));
    updateView("UPDATE %s SET asciival = from_json(?) WHERE k = from_json(?)", "\"ascii \\\" text\"", "0");
    assertRows(execute("SELECT k, udtval from mv_asciival WHERE asciival = ?", "ascii \" text"), row(0, null));
    updateView("DELETE FROM %s WHERE k = from_json(?)", "0");
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0));
    assertRows(execute("SELECT k, udtval from mv_asciival WHERE asciival = ?", "ascii \" text"));
    updateView("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii text\"");
    assertRows(execute("SELECT k, udtval from mv_asciival WHERE asciival = ?", "ascii text"), row(0, null));
    updateView("INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, bigintval FROM %s WHERE k = ?", 0), row(0, 123123123123L));
    assertRows(execute("SELECT k, asciival from mv_bigintval WHERE bigintval = ?", 123123123123L), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "\"0x00000001\"");
    assertRows(execute("SELECT k, blobval FROM %s WHERE k = ?", 0), row(0, ByteBufferUtil.bytes(1)));
    assertRows(execute("SELECT k, asciival from mv_blobval WHERE blobval = ?", ByteBufferUtil.bytes(1)), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "true");
    assertRows(execute("SELECT k, booleanval FROM %s WHERE k = ?", 0), row(0, true));
    assertRows(execute("SELECT k, asciival from mv_booleanval WHERE booleanval = ?", true), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "false");
    assertRows(execute("SELECT k, booleanval FROM %s WHERE k = ?", 0), row(0, false));
    assertRows(execute("SELECT k, asciival from mv_booleanval WHERE booleanval = ?", true));
    assertRows(execute("SELECT k, asciival from mv_booleanval WHERE booleanval = ?", false), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, dateval) VALUES (?, from_json(?))", 0, "\"1987-03-23\"");
    assertRows(execute("SELECT k, dateval FROM %s WHERE k = ?", 0), row(0, SimpleDateSerializer.dateStringToDays("1987-03-23")));
    assertRows(execute("SELECT k, asciival from mv_dateval WHERE dateval = from_json(?)", "\"1987-03-23\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123.123123")));
    assertRows(execute("SELECT k, asciival from mv_decimalval WHERE decimalval = from_json(?)", "123123.123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123")));
    assertRows(execute("SELECT k, asciival from mv_decimalval WHERE decimalval = from_json(?)", "123123.123123"));
    assertRows(execute("SELECT k, asciival from mv_decimalval WHERE decimalval = from_json(?)", "123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "\"123123.123123\"");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123.123123")));
    updateView("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "\"-1.23E-12\"");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("-1.23E-12")));
    assertRows(execute("SELECT k, asciival from mv_decimalval WHERE decimalval = from_json(?)", "\"-1.23E-12\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, doubleval FROM %s WHERE k = ?", 0), row(0, 123123.123123d));
    assertRows(execute("SELECT k, asciival from mv_doubleval WHERE doubleval = from_json(?)", "123123.123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, doubleval FROM %s WHERE k = ?", 0), row(0, 123123.0d));
    assertRows(execute("SELECT k, asciival from mv_doubleval WHERE doubleval = from_json(?)", "123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, floatval FROM %s WHERE k = ?", 0), row(0, 123123.123123f));
    assertRows(execute("SELECT k, asciival from mv_floatval WHERE floatval = from_json(?)", "123123.123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, floatval FROM %s WHERE k = ?", 0), row(0, 123123.0f));
    assertRows(execute("SELECT k, asciival from mv_floatval WHERE floatval = from_json(?)", "123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "\"127.0.0.1\"");
    assertRows(execute("SELECT k, inetval FROM %s WHERE k = ?", 0), row(0, InetAddress.getByName("127.0.0.1")));
    assertRows(execute("SELECT k, asciival from mv_inetval WHERE inetval = from_json(?)", "\"127.0.0.1\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "\"::1\"");
    assertRows(execute("SELECT k, inetval FROM %s WHERE k = ?", 0), row(0, InetAddress.getByName("::1")));
    assertRows(execute("SELECT k, asciival from mv_inetval WHERE inetval = from_json(?)", "\"127.0.0.1\""));
    assertRows(execute("SELECT k, asciival from mv_inetval WHERE inetval = from_json(?)", "\"::1\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 123123));
    assertRows(execute("SELECT k, asciival from mv_intval WHERE intval = from_json(?)", "123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"some \\\" text\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "some \" text"));
    updateView("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"\\u2013\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "\u2013"));
    assertRows(execute("SELECT k, asciival from mv_textval WHERE textval = from_json(?)", "\"\\u2013\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"abcd\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "abcd"));
    assertRows(execute("SELECT k, asciival from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, timeval) VALUES (?, from_json(?))", 0, "\"07:35:07.000111222\"");
    assertRows(execute("SELECT k, timeval FROM %s WHERE k = ?", 0), row(0, TimeSerializer.timeStringToLong("07:35:07.000111222")));
    assertRows(execute("SELECT k, asciival from mv_timeval WHERE timeval = from_json(?)", "\"07:35:07.000111222\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, timestampval FROM %s WHERE k = ?", 0), row(0, new Date(123123123123L)));
    assertRows(execute("SELECT k, asciival from mv_timestampval WHERE timestampval = from_json(?)", "123123123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "\"2014-01-01\"");
    assertRows(execute("SELECT k, timestampval FROM %s WHERE k = ?", 0), row(0, new SimpleDateFormat("y-M-d").parse("2014-01-01")));
    assertRows(execute("SELECT k, asciival from mv_timestampval WHERE timestampval = from_json(?)", "\"2014-01-01\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\"");
    assertRows(execute("SELECT k, timeuuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    updateView("INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\"");
    assertRows(execute("SELECT k, timeuuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    assertRows(execute("SELECT k, asciival from mv_timeuuidval WHERE timeuuidval = from_json(?)", "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\"");
    assertRows(execute("SELECT k, uuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    updateView("INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\"");
    assertRows(execute("SELECT k, uuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    assertRows(execute("SELECT k, asciival from mv_uuidval WHERE uuidval = from_json(?)", "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, varintval FROM %s WHERE k = ?", 0), row(0, new BigInteger("123123123123")));
    assertRows(execute("SELECT k, asciival from mv_varintval WHERE varintval = from_json(?)", "123123123123"), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "\"1234567890123456789012345678901234567890\"");
    assertRows(execute("SELECT k, varintval FROM %s WHERE k = ?", 0), row(0, new BigInteger("1234567890123456789012345678901234567890")));
    assertRows(execute("SELECT k, asciival from mv_varintval WHERE varintval = from_json(?)", "\"1234567890123456789012345678901234567890\""), row(0, "ascii text"));
    updateView("INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[1, 2, 3]");
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(1, 2, 3)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(1, 2, 3)));
    updateView("INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[1]");
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(1)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(1)));
    updateView("UPDATE %s SET listval = listval + from_json(?) WHERE k = ?", "[2]", 0);
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(1, 2)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(1, 2)));
    updateView("UPDATE %s SET listval = from_json(?) + listval WHERE k = ?", "[0]", 0);
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(0, 1, 2)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(0, 1, 2)));
    updateView("UPDATE %s SET listval[1] = from_json(?) WHERE k = ?", "10", 0);
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(0, 10, 2)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(0, 10, 2)));
    updateView("DELETE listval[1] FROM %s WHERE k = ?", 0);
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(0, 2)));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(0, 2)));
    updateView("INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[]");
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, null));
    assertRows(execute("SELECT k, listval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, null));
    updateView("INSERT INTO %s (k, frozenlistval) VALUES (?, from_json(?))", 0, "[1, 2, 3]");
    assertRows(execute("SELECT k, frozenlistval FROM %s WHERE k = ?", 0), row(0, list(1, 2, 3)));
    assertRows(execute("SELECT k, frozenlistval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(1, 2, 3)));
    assertRows(execute("SELECT k, textval from mv_frozenlistval where frozenlistval = from_json(?)", "[1, 2, 3]"), row(0, "abcd"));
    updateView("INSERT INTO %s (k, frozenlistval) VALUES (?, from_json(?))", 0, "[3, 2, 1]");
    assertRows(execute("SELECT k, frozenlistval FROM %s WHERE k = ?", 0), row(0, list(3, 2, 1)));
    assertRows(execute("SELECT k, textval from mv_frozenlistval where frozenlistval = from_json(?)", "[1, 2, 3]"));
    assertRows(execute("SELECT k, textval from mv_frozenlistval where frozenlistval = from_json(?)", "[3, 2, 1]"), row(0, "abcd"));
    assertRows(execute("SELECT k, frozenlistval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list(3, 2, 1)));
    updateView("INSERT INTO %s (k, frozenlistval) VALUES (?, from_json(?))", 0, "[]");
    assertRows(execute("SELECT k, frozenlistval FROM %s WHERE k = ?", 0), row(0, list()));
    assertRows(execute("SELECT k, frozenlistval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, list()));
    updateView("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    assertRows(execute("SELECT k, setval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    updateView("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    assertRows(execute("SELECT k, setval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    updateView("UPDATE %s SET setval = setval + from_json(?) WHERE k = ?", "[\"6bddc89a-5644-0000-97fc-56847afe9799\"]", 0);
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-0000-97fc-56847afe9799"), UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    assertRows(execute("SELECT k, setval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-5644-0000-97fc-56847afe9799"), UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    updateView("UPDATE %s SET setval = setval - from_json(?) WHERE k = ?", "[\"6bddc89a-5644-0000-97fc-56847afe9799\"]", 0);
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    assertRows(execute("SELECT k, setval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    updateView("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, null));
    assertRows(execute("SELECT k, setval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, null));
    updateView("INSERT INTO %s (k, frozensetval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, frozensetval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    assertRows(execute("SELECT k, frozensetval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    updateView("INSERT INTO %s (k, frozensetval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-0000-11e4-97fc-56847afe9799\", \"6bddc89a-5644-11e4-97fc-56847afe9798\"]");
    assertRows(execute("SELECT k, frozensetval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-0000-11e4-97fc-56847afe9799"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798")))));
    assertRows(execute("SELECT k, frozensetval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, set(UUID.fromString("6bddc89a-0000-11e4-97fc-56847afe9799"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798")))));
    updateView("INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": 2}");
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2)));
    assertRows(execute("SELECT k, mapval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, map("a", 1, "b", 2)));
    updateView("UPDATE %s SET mapval[?] = ?  WHERE k = ?", "c", 3, 0);
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2, "c", 3)));
    assertRows(execute("SELECT k, mapval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, map("a", 1, "b", 2, "c", 3)));
    updateView("UPDATE %s SET mapval[?] = ?  WHERE k = ?", "b", 10, 0);
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 10, "c", 3)));
    assertRows(execute("SELECT k, mapval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, map("a", 1, "b", 10, "c", 3)));
    updateView("DELETE mapval[?] FROM %s WHERE k = ?", "b", 0);
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "c", 3)));
    assertRows(execute("SELECT k, mapval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, map("a", 1, "c", 3)));
    updateView("INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{}");
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, null));
    assertRows(execute("SELECT k, mapval from mv_textval WHERE textval = from_json(?)", "\"abcd\""), row(0, null));
    updateView("INSERT INTO %s (k, frozenmapval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": 2}");
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2)));
    assertRows(execute("SELECT k, textval FROM mv_frozenmapval WHERE frozenmapval = from_json(?)", "{\"a\": 1, \"b\": 2}"), row(0, "abcd"));
    updateView("INSERT INTO %s (k, frozenmapval) VALUES (?, from_json(?))", 0, "{\"b\": 2, \"a\": 3}");
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, map("a", 3, "b", 2)));
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, map("a", 3, "b", 2)));
    updateView("INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, \"foobar\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, tupleval FROM %s WHERE k = ?", 0), row(0, tuple(1, "foobar", UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertRows(execute("SELECT k, textval FROM mv_tupleval WHERE tupleval = ?", tuple(1, "foobar", UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))), row(0, "abcd"));
    updateView("INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, null, \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, tupleval FROM %s WHERE k = ?", 0), row(0, tuple(1, null, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertRows(execute("SELECT k, textval FROM mv_tupleval WHERE tupleval = ?", tuple(1, "foobar", UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertRows(execute("SELECT k, textval FROM mv_tupleval WHERE tupleval = ?", tuple(1, null, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))), row(0, "abcd"));
    updateView("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}"), row(0, "abcd"));
    updateView("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"a\": 1, \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}"), row(0, "abcd"));
    updateView("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": null, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, null, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}"));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": null, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}"), row(0, "abcd"));
    updateView("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\"}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), null));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": null, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}"));
    assertRows(execute("SELECT k, textval FROM mv_udtval WHERE udtval = from_json(?)", "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\"}"), row(0, "abcd"));
}
164442.9886131elasticsearch
public TokenStream create(TokenStream tokenStream) {
    if ("arabic".equalsIgnoreCase(language)) {
        return new ArabicStemFilter(tokenStream);
    } else if ("armenian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new ArmenianStemmer());
    } else if ("basque".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new BasqueStemmer());
    } else if ("bengali".equalsIgnoreCase(language)) {
        return new BengaliStemFilter(tokenStream);
    } else if ("brazilian".equalsIgnoreCase(language)) {
        return new BrazilianStemFilter(tokenStream);
    } else if ("bulgarian".equalsIgnoreCase(language)) {
        return new BulgarianStemFilter(tokenStream);
    } else if ("catalan".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new CatalanStemmer());
    } else if ("czech".equalsIgnoreCase(language)) {
        return new CzechStemFilter(tokenStream);
    } else if ("danish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new DanishStemmer());
    } else if ("dutch".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new DutchStemmer());
    } else if ("dutch_kp".equalsIgnoreCase(language) || "dutchKp".equalsIgnoreCase(language) || "kp".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new KpStemmer());
    } else if ("english".equalsIgnoreCase(language)) {
        return new PorterStemFilter(tokenStream);
    } else if ("light_english".equalsIgnoreCase(language) || "lightEnglish".equalsIgnoreCase(language) || "kstem".equalsIgnoreCase(language)) {
        return new KStemFilter(tokenStream);
    } else if ("lovins".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new LovinsStemmer());
    } else if ("porter".equalsIgnoreCase(language)) {
        return new PorterStemFilter(tokenStream);
    } else if ("porter2".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new EnglishStemmer());
    } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
        return new EnglishMinimalStemFilter(tokenStream);
    } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {
        return new EnglishPossessiveFilter(tokenStream);
    } else if ("estonian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new EstonianStemmer());
    } else if ("finnish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new FinnishStemmer());
    } else if ("light_finish".equalsIgnoreCase(language) || "lightFinish".equalsIgnoreCase(language)) {
        return new FinnishLightStemFilter(tokenStream);
    } else if ("light_finnish".equalsIgnoreCase(language) || "lightFinnish".equalsIgnoreCase(language)) {
        return new FinnishLightStemFilter(tokenStream);
    } else if ("french".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new FrenchStemmer());
    } else if ("light_french".equalsIgnoreCase(language) || "lightFrench".equalsIgnoreCase(language)) {
        return new FrenchLightStemFilter(tokenStream);
    } else if ("minimal_french".equalsIgnoreCase(language) || "minimalFrench".equalsIgnoreCase(language)) {
        return new FrenchMinimalStemFilter(tokenStream);
    } else if ("galician".equalsIgnoreCase(language)) {
        return new GalicianStemFilter(tokenStream);
    } else if ("minimal_galician".equalsIgnoreCase(language)) {
        return new GalicianMinimalStemFilter(tokenStream);
    } else if ("german".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new GermanStemmer());
    } else if ("german2".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new German2Stemmer());
    } else if ("light_german".equalsIgnoreCase(language) || "lightGerman".equalsIgnoreCase(language)) {
        return new GermanLightStemFilter(tokenStream);
    } else if ("minimal_german".equalsIgnoreCase(language) || "minimalGerman".equalsIgnoreCase(language)) {
        return new GermanMinimalStemFilter(tokenStream);
    } else if ("greek".equalsIgnoreCase(language)) {
        return new GreekStemFilter(tokenStream);
    } else if ("hindi".equalsIgnoreCase(language)) {
        return new HindiStemFilter(tokenStream);
    } else if ("hungarian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new HungarianStemmer());
    } else if ("light_hungarian".equalsIgnoreCase(language) || "lightHungarian".equalsIgnoreCase(language)) {
        return new HungarianLightStemFilter(tokenStream);
    } else if ("indonesian".equalsIgnoreCase(language)) {
        return new IndonesianStemFilter(tokenStream);
    } else if ("irish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new IrishStemmer());
    } else if ("italian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new ItalianStemmer());
    } else if ("light_italian".equalsIgnoreCase(language) || "lightItalian".equalsIgnoreCase(language)) {
        return new ItalianLightStemFilter(tokenStream);
    } else if ("latvian".equalsIgnoreCase(language)) {
        return new LatvianStemFilter(tokenStream);
    } else if ("lithuanian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new LithuanianStemmer());
    } else if ("norwegian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new NorwegianStemmer());
    } else if ("light_norwegian".equalsIgnoreCase(language) || "lightNorwegian".equalsIgnoreCase(language)) {
        return new NorwegianLightStemFilter(tokenStream);
    } else if ("minimal_norwegian".equalsIgnoreCase(language) || "minimalNorwegian".equals(language)) {
        return new NorwegianMinimalStemFilter(tokenStream);
    } else if ("light_nynorsk".equalsIgnoreCase(language) || "lightNynorsk".equalsIgnoreCase(language)) {
        return new NorwegianLightStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK);
    } else if ("minimal_nynorsk".equalsIgnoreCase(language) || "minimalNynorsk".equalsIgnoreCase(language)) {
        return new NorwegianMinimalStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK);
    } else if ("persian".equalsIgnoreCase(language)) {
        return new PersianStemFilter(tokenStream);
    } else if ("portuguese".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new PortugueseStemmer());
    } else if ("light_portuguese".equalsIgnoreCase(language) || "lightPortuguese".equalsIgnoreCase(language)) {
        return new PortugueseLightStemFilter(tokenStream);
    } else if ("minimal_portuguese".equalsIgnoreCase(language) || "minimalPortuguese".equalsIgnoreCase(language)) {
        return new PortugueseMinimalStemFilter(tokenStream);
    } else if ("portuguese_rslp".equalsIgnoreCase(language)) {
        return new PortugueseStemFilter(tokenStream);
    } else if ("romanian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new RomanianStemmer());
    } else if ("russian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new RussianStemmer());
    } else if ("light_russian".equalsIgnoreCase(language) || "lightRussian".equalsIgnoreCase(language)) {
        return new RussianLightStemFilter(tokenStream);
    } else if ("serbian".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new SerbianStemmer());
    } else if ("spanish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new SpanishStemmer());
    } else if ("light_spanish".equalsIgnoreCase(language) || "lightSpanish".equalsIgnoreCase(language)) {
        return new SpanishLightStemFilter(tokenStream);
    } else if ("spanish_plural".equalsIgnoreCase(language)) {
        return new SpanishPluralStemFilter(tokenStream);
    } else if ("sorani".equalsIgnoreCase(language)) {
        return new SoraniStemFilter(tokenStream);
    } else if ("swedish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new SwedishStemmer());
    } else if ("light_swedish".equalsIgnoreCase(language) || "lightSwedish".equalsIgnoreCase(language)) {
        return new SwedishLightStemFilter(tokenStream);
    } else if ("turkish".equalsIgnoreCase(language)) {
        return new SnowballFilter(tokenStream, new TurkishStemmer());
    }
    return new SnowballFilter(tokenStream, language);
}
163945.849231elasticsearch
public void visitBinaryMath(BinaryMathNode irBinaryMathNode, Consumer<ExpressionNode> scope) {
    irBinaryMathNode.getLeftNode().visit(this, irBinaryMathNode::setLeftNode);
    irBinaryMathNode.getRightNode().visit(this, irBinaryMathNode::setRightNode);
    if (irBinaryMathNode.getLeftNode() instanceof ConstantNode && irBinaryMathNode.getRightNode() instanceof ConstantNode) {
        ExpressionNode irLeftConstantNode = irBinaryMathNode.getLeftNode();
        ExpressionNode irRightConstantNode = irBinaryMathNode.getRightNode();
        Object leftConstantValue = irLeftConstantNode.getDecorationValue(IRDConstant.class);
        Object rightConstantValue = irRightConstantNode.getDecorationValue(IRDConstant.class);
        Operation operation = irBinaryMathNode.getDecorationValue(IRDOperation.class);
        Class<?> type = irBinaryMathNode.getDecorationValue(IRDExpressionType.class);
        if (operation == Operation.MUL) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue * (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue * (long) rightConstantValue));
            } else if (type == float.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue * (float) rightConstantValue));
            } else if (type == double.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue * (double) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.DIV) {
            try {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue / (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue / (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue / (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue / (double) rightConstantValue));
                } else {
                    throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
            } catch (ArithmeticException ae) {
                throw irBinaryMathNode.getLocation().createError(ae);
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.REM) {
            try {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue % (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue % (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue % (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue % (double) rightConstantValue));
                } else {
                    throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
            } catch (ArithmeticException ae) {
                throw irBinaryMathNode.getLocation().createError(ae);
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.ADD) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue + (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue + (long) rightConstantValue));
            } else if (type == float.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue + (float) rightConstantValue));
            } else if (type == double.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue + (double) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.SUB) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue - (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue - (long) rightConstantValue));
            } else if (type == float.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue - (float) rightConstantValue));
            } else if (type == double.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue - (double) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.LSH) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue << (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue << (int) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.RSH) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue >> (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue >> (int) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.USH) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue >>> (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue >>> (int) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.BWAND) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue & (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue & (long) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.XOR) {
            if (type == boolean.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((boolean) leftConstantValue ^ (boolean) rightConstantValue));
            } else if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue ^ (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue ^ (long) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.BWOR) {
            if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue | (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue | (long) rightConstantValue));
            } else {
                throw irBinaryMathNode.getLocation().createError(binaryError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
            }
            scope.accept(irLeftConstantNode);
        }
    }
}
165261.931410elasticsearch
public void testDescriptionAfterBootstrapping() {
    final DiscoveryNode localNode = makeDiscoveryNode("local");
    final ClusterState clusterState = state(localNode, "otherNode");
    assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [otherNode], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    final TransportAddress otherAddress = buildNewFakeTransportAddress();
    assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, singletonList(otherAddress), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [otherNode], " + "have only discovered non-quorum []; " + "discovery will continue using [" + otherAddress + "] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    final DiscoveryNode otherNode = makeDiscoveryNode("otherNode");
    assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(otherNode), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [otherNode], " + "have discovered possible quorum [" + noAttr(otherNode) + "]; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    final DiscoveryNode yetAnotherNode = makeDiscoveryNode("yetAnotherNode");
    assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(yetAnotherNode), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [otherNode], " + "have only discovered non-quorum [" + noAttr(yetAnotherNode) + "]; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    final DiscoveryNode recentMaster = makeDiscoveryNode("recentMaster");
    assertThat(new ClusterFormationState(Settings.EMPTY, clusterState, emptyList(), singletonList(yetAnotherNode), singleton(recentMaster), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [otherNode], " + "have only discovered non-quorum [" + noAttr(yetAnotherNode) + "] who claim current master to be [" + noAttr(recentMaster) + "]; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires at least 2 nodes with ids from [n1, n2, n3], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4", "n5"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4, n5], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[] { "n1" }, new String[] { "n1" }), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[] { "n1" }, new String[] { "n2" }), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1] and a node with id [n2], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[] { "n1" }, new String[] { "n2", "n3" }), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1] and two nodes with ids [n2, n3], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[] { "n1" }, new String[] { "n2", "n3", "n4" }), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1] and " + "at least 2 nodes with ids from [n2, n3, n4], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
    final DiscoveryNode otherMasterNode = makeDiscoveryNode("other-master");
    final DiscoveryNode otherNonMasterNode = DiscoveryNodeUtils.builder("other-non-master").roles(new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles()).stream().filter(r -> r != DiscoveryNodeRole.MASTER_ROLE).toList())).build();
    String[] configNodeIds = new String[] { "n1", "n2" };
    final ClusterState stateWithOtherNodes = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).add(otherMasterNode).add(otherNonMasterNode)).metadata(Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().lastAcceptedConfiguration(config(configNodeIds)).lastCommittedConfiguration(config(configNodeIds)).build())).build();
    assertThat(new ClusterFormationState(Settings.EMPTY, stateWithOtherNodes, emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is(oneOf("master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + ", " + noAttr(otherMasterNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0", "master not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(otherMasterNode) + ", " + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")));
    assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, GatewayMetaState.STALE_STATE_CONFIG_NODE_ID), emptyList(), emptyList(), emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), emptyList()).getDescription(), is("master not discovered or elected yet, an election requires one or more nodes that have already participated as " + "master-eligible nodes in the cluster but this node was not master-eligible the last time it joined the cluster, " + "have only discovered non-quorum []; " + "discovery will continue using [] from hosts providers and [" + noAttr(localNode) + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0"));
}
166620.645381elasticsearch
public Collection<?> createComponents(PluginServices services) {
    Client client = services.client();
    ClusterService clusterService = services.clusterService();
    ThreadPool threadPool = services.threadPool();
    Environment environment = services.environment();
    NamedXContentRegistry xContentRegistry = services.xContentRegistry();
    IndexNameExpressionResolver indexNameExpressionResolver = services.indexNameExpressionResolver();
    TelemetryProvider telemetryProvider = services.telemetryProvider();
    if (enabled == false) {
        return List.of(new JobManagerHolder(), new MachineLearningExtensionHolder());
    }
    if ("darwin-x86_64".equals(Platforms.PLATFORM_NAME)) {
        String msg = "The machine learning plugin will be permanently disabled on macOS x86_64 in new minor versions released " + "from December 2024 onwards. To continue to use machine learning functionality on macOS please switch to an arm64 " + "machine (Apple silicon). Alternatively, it will still be possible to run Elasticsearch with machine learning " + "enabled in a Docker container on macOS x86_64.";
        logger.warn(msg);
        deprecationLogger.warn(DeprecationCategory.PLUGINS, "ml-darwin-x86_64", msg);
    }
    machineLearningExtension.get().configure(environment.settings());
    this.mlUpgradeModeActionFilter.set(new MlUpgradeModeActionFilter(clusterService));
    MlIndexTemplateRegistry registry = new MlIndexTemplateRegistry(settings, clusterService, threadPool, client, machineLearningExtension.get().useIlm(), xContentRegistry);
    registry.initialize();
    AnomalyDetectionAuditor anomalyDetectionAuditor = new AnomalyDetectionAuditor(client, clusterService, machineLearningExtension.get().includeNodeInfo());
    DataFrameAnalyticsAuditor dataFrameAnalyticsAuditor = new DataFrameAnalyticsAuditor(client, clusterService, machineLearningExtension.get().includeNodeInfo());
    InferenceAuditor inferenceAuditor = new InferenceAuditor(client, clusterService, machineLearningExtension.get().includeNodeInfo());
    SystemAuditor systemAuditor = new SystemAuditor(client, clusterService);
    this.dataFrameAnalyticsAuditor.set(dataFrameAnalyticsAuditor);
    OriginSettingClient originSettingClient = new OriginSettingClient(client, ML_ORIGIN);
    ResultsPersisterService resultsPersisterService = new ResultsPersisterService(threadPool, originSettingClient, clusterService, settings);
    AnnotationPersister anomalyDetectionAnnotationPersister = new AnnotationPersister(resultsPersisterService);
    JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings, indexNameExpressionResolver);
    JobResultsPersister jobResultsPersister = new JobResultsPersister(originSettingClient, resultsPersisterService);
    JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client, resultsPersisterService, anomalyDetectionAuditor);
    JobConfigProvider jobConfigProvider = new JobConfigProvider(client, xContentRegistry);
    DatafeedConfigProvider datafeedConfigProvider = new DatafeedConfigProvider(client, xContentRegistry, clusterService);
    this.datafeedConfigProvider.set(datafeedConfigProvider);
    UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(client, clusterService, threadPool);
    JobManager jobManager = new JobManager(jobResultsProvider, jobResultsPersister, clusterService, anomalyDetectionAuditor, threadPool, client, notifier, xContentRegistry, indexNameExpressionResolver, () -> NativeMemoryCalculator.getMaxModelMemoryLimit(clusterService));
    DatafeedManager datafeedManager = new DatafeedManager(datafeedConfigProvider, jobConfigProvider, xContentRegistry, settings, client);
    JobManagerHolder jobManagerHolder = new JobManagerHolder(jobManager);
    NativeStorageProvider nativeStorageProvider = new NativeStorageProvider(environment, MIN_DISK_SPACE_OFF_HEAP.get(settings));
    final MlController mlController;
    final AutodetectProcessFactory autodetectProcessFactory;
    final NormalizerProcessFactory normalizerProcessFactory;
    final AnalyticsProcessFactory<AnalyticsResult> analyticsProcessFactory;
    final AnalyticsProcessFactory<MemoryUsageEstimationResult> memoryEstimationProcessFactory;
    final PyTorchProcessFactory pyTorchProcessFactory;
    if (MachineLearningField.AUTODETECT_PROCESS.get(settings)) {
        try {
            NativeController nativeController = NativeController.makeNativeController(clusterService.getNodeName(), environment, xContentRegistry);
            autodetectProcessFactory = new NativeAutodetectProcessFactory(environment, settings, nativeController, clusterService, resultsPersisterService, anomalyDetectionAuditor);
            normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, nativeController, clusterService);
            analyticsProcessFactory = new NativeAnalyticsProcessFactory(environment, nativeController, clusterService, xContentRegistry, resultsPersisterService, dataFrameAnalyticsAuditor);
            memoryEstimationProcessFactory = new NativeMemoryUsageEstimationProcessFactory(environment, nativeController, clusterService);
            pyTorchProcessFactory = new NativePyTorchProcessFactory(environment, nativeController, clusterService);
            mlController = nativeController;
        } catch (IOException e) {
            logger.trace("Failed to connect to ML native controller", e);
            throw new ElasticsearchException("Failure running machine learning native code. This could be due to running " + "on an unsupported OS or distribution, missing OS libraries, or a problem with the temp directory. To " + "bypass this problem by running Elasticsearch without machine learning functionality set [" + XPackSettings.MACHINE_LEARNING_ENABLED.getKey() + ": false].");
        }
    } else {
        mlController = new DummyController();
        autodetectProcessFactory = (pipelineId, job, autodetectParams, executorService, onProcessCrash) -> new BlackHoleAutodetectProcess(pipelineId, onProcessCrash);
        normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> new MultiplyingNormalizerProcess(1.0);
        analyticsProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null;
        memoryEstimationProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null;
        pyTorchProcessFactory = (task, executorService, afterInputStreamClose, onProcessCrash) -> new BlackHolePyTorchProcess();
    }
    NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory, threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME));
    AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(settings, client, threadPool, xContentRegistry, anomalyDetectionAuditor, clusterService, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, anomalyDetectionAnnotationPersister, autodetectProcessFactory, normalizerFactory, nativeStorageProvider, indexNameExpressionResolver);
    this.autodetectProcessManager.set(autodetectProcessManager);
    DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, xContentRegistry, anomalyDetectionAuditor, anomalyDetectionAnnotationPersister, System::currentTimeMillis, jobResultsPersister, settings, clusterService);
    DatafeedContextProvider datafeedContextProvider = new DatafeedContextProvider(jobConfigProvider, datafeedConfigProvider, jobResultsProvider);
    DatafeedRunner datafeedRunner = new DatafeedRunner(threadPool, client, clusterService, datafeedJobBuilder, System::currentTimeMillis, anomalyDetectionAuditor, autodetectProcessManager, datafeedContextProvider);
    this.datafeedRunner.set(datafeedRunner);
    final TrainedModelStatsService trainedModelStatsService = new TrainedModelStatsService(resultsPersisterService, originSettingClient, indexNameExpressionResolver, clusterService, threadPool);
    final TrainedModelCacheMetadataService trainedModelCacheMetadataService = new TrainedModelCacheMetadataService(clusterService, client);
    final TrainedModelProvider trainedModelProvider = new TrainedModelProvider(client, trainedModelCacheMetadataService, xContentRegistry);
    final ModelLoadingService modelLoadingService = new ModelLoadingService(trainedModelProvider, inferenceAuditor, threadPool, clusterService, trainedModelStatsService, settings, clusterService.getNodeName(), inferenceModelBreaker.get(), getLicenseState());
    this.modelLoadingService.set(modelLoadingService);
    this.learningToRankService.set(new LearningToRankService(modelLoadingService, trainedModelProvider, services.scriptService(), services.xContentRegistry()));
    this.deploymentManager.set(new DeploymentManager(client, xContentRegistry, threadPool, pyTorchProcessFactory, getMaxModelDeploymentsPerNode(), inferenceAuditor));
    AnalyticsProcessManager analyticsProcessManager = new AnalyticsProcessManager(settings, client, threadPool, analyticsProcessFactory, dataFrameAnalyticsAuditor, trainedModelProvider, resultsPersisterService, EsExecutors.allocatedProcessors(settings));
    MemoryUsageEstimationProcessManager memoryEstimationProcessManager = new MemoryUsageEstimationProcessManager(threadPool.generic(), threadPool.executor(UTILITY_THREAD_POOL_NAME), memoryEstimationProcessFactory);
    DataFrameAnalyticsConfigProvider dataFrameAnalyticsConfigProvider = new DataFrameAnalyticsConfigProvider(client, xContentRegistry, dataFrameAnalyticsAuditor, clusterService);
    assert client instanceof NodeClient;
    DataFrameAnalyticsManager dataFrameAnalyticsManager = new DataFrameAnalyticsManager(settings, (NodeClient) client, threadPool, clusterService, dataFrameAnalyticsConfigProvider, analyticsProcessManager, dataFrameAnalyticsAuditor, indexNameExpressionResolver, resultsPersisterService, modelLoadingService, machineLearningExtension.get().getAnalyticsDestIndexAllowedSettings());
    this.dataFrameAnalyticsManager.set(dataFrameAnalyticsManager);
    MlMemoryTracker memoryTracker = new MlMemoryTracker(settings, clusterService, threadPool, jobManager, jobResultsProvider, dataFrameAnalyticsConfigProvider);
    this.memoryTracker.set(memoryTracker);
    MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(clusterService, datafeedRunner, mlController, autodetectProcessManager, dataFrameAnalyticsManager, memoryTracker);
    this.mlLifeCycleService.set(mlLifeCycleService);
    MlAssignmentNotifier mlAssignmentNotifier = new MlAssignmentNotifier(anomalyDetectionAuditor, dataFrameAnalyticsAuditor, threadPool, clusterService);
    MlAutoUpdateService mlAutoUpdateService = new MlAutoUpdateService(threadPool, List.of(new DatafeedConfigAutoUpdater(datafeedConfigProvider, indexNameExpressionResolver)));
    clusterService.addListener(mlAutoUpdateService);
    final InvalidLicenseEnforcer enforcer = new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedRunner, autodetectProcessManager);
    enforcer.listenForLicenseStateChanges();
    nativeStorageProvider.cleanupLocalTmpStorageInCaseOfUncleanShutdown();
    AbstractNodeAvailabilityZoneMapper nodeAvailabilityZoneMapper = machineLearningExtension.get().getNodeAvailabilityZoneMapper(settings, clusterService.getClusterSettings());
    clusterService.addListener(nodeAvailabilityZoneMapper);
    final TrainedModelAssignmentService trainedModelAssignmentService = new TrainedModelAssignmentService(client, clusterService, threadPool);
    trainedModelAllocationClusterServiceSetOnce.set(new TrainedModelAssignmentClusterService(settings, clusterService, threadPool, new NodeLoadDetector(memoryTracker), systemAuditor, nodeAvailabilityZoneMapper, client));
    mlAutoscalingDeciderService.set(new MlAutoscalingDeciderService(memoryTracker, settings, nodeAvailabilityZoneMapper, clusterService));
    MlInitializationService mlInitializationService = new MlInitializationService(settings, threadPool, clusterService, client, mlAssignmentNotifier, machineLearningExtension.get().isAnomalyDetectionEnabled(), machineLearningExtension.get().isDataFrameAnalyticsEnabled(), machineLearningExtension.get().isNlpEnabled());
    MlMetrics mlMetrics = new MlMetrics(telemetryProvider.getMeterRegistry(), clusterService, settings, autodetectProcessManager, dataFrameAnalyticsManager);
    return List.of(mlLifeCycleService, new MlControllerHolder(mlController), jobResultsProvider, jobResultsPersister, jobConfigProvider, datafeedConfigProvider, jobManager, jobManagerHolder, autodetectProcessManager, mlInitializationService, jobDataCountsPersister, datafeedRunner, datafeedManager, anomalyDetectionAuditor, dataFrameAnalyticsAuditor, inferenceAuditor, systemAuditor, mlAssignmentNotifier, mlAutoUpdateService, memoryTracker, analyticsProcessManager, memoryEstimationProcessManager, dataFrameAnalyticsConfigProvider, nativeStorageProvider, modelLoadingService, trainedModelCacheMetadataService, trainedModelProvider, trainedModelAssignmentService, trainedModelAllocationClusterServiceSetOnce.get(), deploymentManager.get(), nodeAvailabilityZoneMapper, new MachineLearningExtensionHolder(machineLearningExtension.get()), mlMetrics);
}
169214.4115287elasticsearch
public void testCreateAndRestorePartialSearchableSnapshot() throws Exception {
    final String fsRepoName = randomAlphaOfLength(10);
    final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String aliasName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createRepository(fsRepoName, "fs", Settings.builder().put("location", randomRepoPath()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
    final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true);
    if (randomBoolean()) {
        originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false");
    }
    assertAcked(prepareCreate(indexName, originalIndexSettings));
    assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName));
    populateIndex(indexName, 10_000);
    final TotalHits originalAllHits;
    var originalResponse = internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).get();
    try {
        originalAllHits = originalResponse.getHits().getTotalHits();
    } finally {
        originalResponse.decRef();
    }
    final TotalHits originalBarHits;
    var barResponse = internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")).get();
    try {
        originalBarHits = barResponse.getHits().getTotalHits();
    } finally {
        barResponse.decRef();
    }
    logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits);
    expectThrows(ResourceNotFoundException.class, "Searchable snapshot stats on a non snapshot searchable index should fail", () -> client().execute(SearchableSnapshotsStatsAction.INSTANCE, new SearchableSnapshotsStatsRequest()).actionGet());
    final SnapshotInfo snapshotInfo = createFullSnapshot(fsRepoName, snapshotName);
    ensureGreen(indexName);
    assertShardFolders(indexName, false);
    assertThat(clusterAdmin().prepareState().clear().setMetadata(true).setIndices(indexName).get().getState().metadata().index(indexName).getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN));
    final boolean deletedBeforeMount = randomBoolean();
    if (deletedBeforeMount) {
        assertAcked(indicesAdmin().prepareDelete(indexName));
    } else {
        assertAcked(indicesAdmin().prepareClose(indexName));
    }
    logger.info("--> restoring partial index [{}] with cache enabled", restoredIndexName);
    Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true);
    final List<String> nonCachedExtensions;
    if (randomBoolean()) {
        nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim"));
        indexSettingsBuilder.putList(SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.getKey(), nonCachedExtensions);
    } else {
        nonCachedExtensions = Collections.emptyList();
    }
    if (randomBoolean()) {
        indexSettingsBuilder.put(SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(randomLongBetween(10, 100_000)));
    }
    final int expectedReplicas;
    if (randomBoolean()) {
        expectedReplicas = numberOfReplicas();
        indexSettingsBuilder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, expectedReplicas);
    } else {
        expectedReplicas = 0;
    }
    final String indexCheckOnStartup;
    if (randomBoolean()) {
        indexCheckOnStartup = randomFrom("false", "true", "checksum");
        indexSettingsBuilder.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), indexCheckOnStartup);
    } else {
        indexCheckOnStartup = "false";
    }
    final String expectedDataTiersPreference;
    expectedDataTiersPreference = MountSearchableSnapshotRequest.Storage.SHARED_CACHE.defaultDataTiersPreference();
    indexSettingsBuilder.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.ZERO);
    final AtomicBoolean statsWatcherRunning = new AtomicBoolean(true);
    final Thread statsWatcher = new Thread(() -> {
        while (statsWatcherRunning.get()) {
            final IndicesStatsResponse indicesStatsResponse;
            try {
                indicesStatsResponse = indicesAdmin().prepareStats(restoredIndexName).clear().setStore(true).get();
            } catch (IndexNotFoundException | IndexClosedException e) {
                continue;
            }
            for (ShardStats shardStats : indicesStatsResponse.getShards()) {
                StoreStats store = shardStats.getStats().getStore();
                assertThat(shardStats.getShardRouting().toString(), store.reservedSizeInBytes(), equalTo(0L));
                assertThat(shardStats.getShardRouting().toString(), store.sizeInBytes(), equalTo(0L));
            }
            if (indicesStatsResponse.getShards().length > 0) {
                assertThat(indicesStatsResponse.getTotal().getStore().reservedSizeInBytes(), equalTo(0L));
                assertThat(indicesStatsResponse.getTotal().getStore().sizeInBytes(), equalTo(0L));
            }
        }
    }, "test-stats-watcher");
    statsWatcher.start();
    final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(restoredIndexName, fsRepoName, snapshotInfo.snapshotId().getName(), indexName, indexSettingsBuilder.build(), Strings.EMPTY_ARRAY, true, MountSearchableSnapshotRequest.Storage.SHARED_CACHE);
    final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
    final Map<Integer, SnapshotIndexShardStatus> snapshotShards = clusterAdmin().prepareSnapshotStatus(fsRepoName).setSnapshots(snapshotInfo.snapshotId().getName()).get().getSnapshots().get(0).getIndices().get(indexName).getShards();
    ensureGreen(restoredIndexName);
    final IndicesStatsResponse indicesStatsResponse = indicesAdmin().prepareStats(restoredIndexName).clear().setStore(true).get();
    assertThat(indicesStatsResponse.getShards().length, greaterThan(0));
    long totalExpectedSize = 0;
    for (ShardStats shardStats : indicesStatsResponse.getShards()) {
        StoreStats store = shardStats.getStats().getStore();
        final ShardRouting shardRouting = shardStats.getShardRouting();
        assertThat(shardRouting.toString(), store.reservedSizeInBytes(), equalTo(0L));
        assertThat(shardRouting.toString(), store.sizeInBytes(), equalTo(0L));
        final long originalSize = snapshotShards.get(shardRouting.getId()).getStats().getTotalSize();
        totalExpectedSize += originalSize;
        final Directory unwrappedDir = FilterDirectory.unwrap(internalCluster().getInstance(IndicesService.class, getDiscoveryNodes().resolveNode(shardRouting.currentNodeId()).getName()).indexServiceSafe(shardRouting.index()).getShard(shardRouting.getId()).store().directory());
        assertThat(shardRouting.toString(), unwrappedDir, notNullValue());
        assertThat(shardRouting.toString(), unwrappedDir, instanceOf(ByteBuffersDirectory.class));
        final ByteBuffersDirectory inMemoryDir = (ByteBuffersDirectory) unwrappedDir;
        assertThat(inMemoryDir.listAll(), arrayWithSize(1));
        assertThat(shardRouting.toString(), store.totalDataSetSizeInBytes(), equalTo(originalSize));
    }
    final StoreStats store = indicesStatsResponse.getTotal().getStore();
    assertThat(store.totalDataSetSizeInBytes(), equalTo(totalExpectedSize));
    statsWatcherRunning.set(false);
    statsWatcher.join();
    final Settings settings = indicesAdmin().prepareGetSettings(restoredIndexName).get().getIndexToSettings().get(restoredIndexName);
    assertThat(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.get(settings), equalTo(snapshotName));
    assertThat(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings), equalTo(SEARCHABLE_SNAPSHOT_STORE_TYPE));
    assertThat(IndexModule.INDEX_RECOVERY_TYPE_SETTING.get(settings), equalTo(SNAPSHOT_RECOVERY_STATE_FACTORY_KEY));
    assertTrue(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(settings));
    assertTrue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.exists(settings));
    assertTrue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.exists(settings));
    assertThat(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(settings).toString(), equalTo("false"));
    assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings), equalTo(expectedReplicas));
    assertThat(DataTier.TIER_PREFERENCE_SETTING.get(settings), equalTo(expectedDataTiersPreference));
    assertTrue(SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING.get(settings));
    assertTrue(DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS.get(settings));
    assertThat(IndexSettings.INDEX_CHECK_ON_STARTUP.get(settings), equalTo(indexCheckOnStartup));
    checkSoftDeletesNotEagerlyLoaded(restoredIndexName);
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, false);
    ensureGreen(restoredIndexName);
    assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS);
    assertThat(clusterAdmin().prepareState().clear().setMetadata(true).setIndices(restoredIndexName).get().getState().metadata().index(restoredIndexName).getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN));
    if (deletedBeforeMount) {
        assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(0));
        assertAcked(indicesAdmin().prepareAliases().addAlias(restoredIndexName, aliasName));
    } else if (indexName.equals(restoredIndexName) == false) {
        assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(1));
        assertAcked(indicesAdmin().prepareAliases().addAliasAction(IndicesAliasesRequest.AliasActions.remove().index(indexName).alias(aliasName).mustExist(true)).addAlias(restoredIndexName, aliasName));
    }
    assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(1));
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
    final Decision diskDeciderDecision = clusterAdmin().prepareAllocationExplain().setIndex(restoredIndexName).setShard(0).setPrimary(true).setIncludeYesDecisions(true).get().getExplanation().getShardAllocationDecision().getMoveDecision().getCanRemainDecision().getDecisions().stream().filter(d -> d.label().equals(DiskThresholdDecider.NAME)).findFirst().orElseThrow();
    assertThat(diskDeciderDecision.type(), equalTo(Decision.Type.YES));
    assertThat(diskDeciderDecision.getExplanation(), oneOf("disk watermarks are ignored on this index", "there is only a single data node present"));
    internalCluster().fullRestart();
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, false);
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
    internalCluster().ensureAtLeastNumDataNodes(2);
    final DiscoveryNode dataNode = randomFrom(clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values());
    updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNode.getName()), restoredIndexName);
    assertFalse(clusterAdmin().prepareHealth(restoredIndexName).setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, false);
    updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()), restoredIndexName);
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, false);
    final String clonedIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    assertAcked(indicesAdmin().prepareResizeIndex(restoredIndexName, clonedIndexName).setResizeType(ResizeType.CLONE).setSettings(Settings.builder().putNull(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()).putNull(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey()).put(DataTier.TIER_PREFERENCE, DataTier.DATA_HOT).build()));
    ensureGreen(clonedIndexName);
    assertTotalHits(clonedIndexName, originalAllHits, originalBarHits);
    final Settings clonedIndexSettings = indicesAdmin().prepareGetSettings(clonedIndexName).get().getIndexToSettings().get(clonedIndexName);
    assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey()));
    assertAcked(indicesAdmin().prepareDelete(restoredIndexName));
    assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(0));
    assertAcked(indicesAdmin().prepareAliases().addAlias(clonedIndexName, aliasName));
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
}
166030.137245hadoop
public void writeBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientname, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo srcDataNode, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings, final String storageId, final String[] targetStorageIds) throws IOException {
    previousOpClientName = clientname;
    updateCurrentThreadName("Receiving block " + block);
    final boolean isDatanode = clientname.length() == 0;
    final boolean isClient = !isDatanode;
    final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW || stage == BlockConstructionStage.TRANSFER_FINALIZED;
    allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
    long size = 0;
    final DataOutputStream replyOut = getBufferedOutputStream();
    int nst = targetStorageTypes.length;
    StorageType[] storageTypes = new StorageType[nst + 1];
    storageTypes[0] = storageType;
    if (targetStorageTypes.length > 0) {
        System.arraycopy(targetStorageTypes, 0, storageTypes, 1, nst);
    }
    final int nsi = targetStorageIds.length;
    final String[] storageIds;
    if (nsi > 0) {
        storageIds = new String[nsi + 1];
        storageIds[0] = storageId;
        if (targetStorageTypes.length > 0) {
            System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
        }
    } else {
        storageIds = new String[0];
    }
    checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE, storageTypes, storageIds);
    if (isTransfer && targets.length > 0) {
        throw new IOException(stage + " does not support multiple targets " + Arrays.asList(targets));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("opWriteBlock: stage={}, clientname={}\n  " + "block  ={}, newGs={}, bytesRcvd=[{}, {}]\n  " + "targets={}; pipelineSize={}, srcDataNode={}, pinning={}", stage, clientname, block, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, Arrays.asList(targets), pipelineSize, srcDataNode, pinning);
        LOG.debug("isDatanode={}, isClient={}, isTransfer={}", isDatanode, isClient, isTransfer);
        LOG.debug("writeBlock receive buf size {} tcp no delay {}", peer.getReceiveBufferSize(), peer.getTcpNoDelay());
    }
    final ExtendedBlock originalBlock = new ExtendedBlock(block);
    if (block.getNumBytes() == 0) {
        block.setNumBytes(dataXceiverServer.estimateBlockSize);
    }
    LOG.info("Receiving {} src: {} dest: {}", block, remoteAddress, localAddress);
    DataOutputStream mirrorOut = null;
    DataInputStream mirrorIn = null;
    Socket mirrorSock = null;
    String mirrorNode = null;
    String firstBadLink = "";
    Status mirrorInStatus = SUCCESS;
    final String storageUuid;
    final boolean isOnTransientStorage;
    try {
        final Replica replica;
        if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, peer.getRemoteAddressString(), peer.getLocalAddressString(), stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd, clientname, srcDataNode, datanode, requestedChecksum, cachingStrategy, allowLazyPersist, pinning, storageId));
            replica = blockReceiver.getReplica();
        } else {
            replica = datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
        }
        storageUuid = replica.getStorageUuid();
        isOnTransientStorage = replica.isOnTransientStorage();
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
            LOG.debug("Connecting to datanode {}", mirrorNode);
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                DataNodeFaultInjector.get().failMirrorConnection();
                int timeoutValue = dnConf.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
                int writeTimeout = dnConf.socketWriteTimeout + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setKeepAlive(true);
                if (dnConf.getTransferSocketSendBufferSize() > 0) {
                    mirrorSock.setSendBufferSize(dnConf.getTransferSocketSendBufferSize());
                }
                OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock, writeTimeout);
                InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
                DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
                SecretKey secretKey = null;
                if (dnConf.overwriteDownstreamDerivedQOP) {
                    String bpid = block.getBlockPoolId();
                    BlockKey blockKey = datanode.blockPoolTokenSecretManager.get(bpid).getCurrentKey();
                    secretKey = blockKey.getKey();
                }
                IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock, unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0], secretKey);
                unbufMirrorOut = saslStreams.out;
                unbufMirrorIn = saslStreams.in;
                mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut, smallBufferSize));
                mirrorIn = new DataInputStream(unbufMirrorIn);
                String targetStorageId = null;
                if (targetStorageIds.length > 0) {
                    targetStorageId = targetStorageIds[0];
                }
                if (targetPinnings != null && targetPinnings.length > 0) {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, targetPinnings[0], targetPinnings, targetStorageId, targetStorageIds);
                } else {
                    new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0], blockToken, clientname, targets, targetStorageTypes, srcDataNode, stage, pipelineSize, minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum, cachingStrategy, allowLazyPersist, false, targetPinnings, targetStorageId, targetStorageIds);
                }
                mirrorOut.flush();
                DataNodeFaultInjector.get().writeBlockAfterFlush();
                if (isClient) {
                    BlockOpResponseProto connectAck = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn));
                    mirrorInStatus = connectAck.getStatus();
                    firstBadLink = connectAck.getFirstBadLink();
                    if (mirrorInStatus != SUCCESS) {
                        LOG.debug("Datanode {} got response for connect" + "ack  from downstream datanode with firstbadlink as {}", targets.length, firstBadLink);
                    }
                }
            } catch (IOException e) {
                if (isClient) {
                    BlockOpResponseProto.newBuilder().setStatus(ERROR).setFirstBadLink(targets[0].getXferAddr()).build().writeDelimitedTo(replyOut);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (isClient) {
                    LOG.error("{}:Exception transferring block {} to mirror {}", datanode, block, mirrorNode, e);
                    throw e;
                } else {
                    LOG.info("{}:Exception transferring {} to mirror {}- continuing " + "without the mirror", datanode, block, mirrorNode, e);
                    incrDatanodeNetworkErrors();
                }
            }
        }
        if (isClient && !isTransfer) {
            if (mirrorInStatus != SUCCESS) {
                LOG.debug("Datanode {} forwarding connect ack to upstream " + "firstbadlink is {}", targets.length, firstBadLink);
            }
            BlockOpResponseProto.newBuilder().setStatus(mirrorInStatus).setFirstBadLink(firstBadLink).build().writeDelimitedTo(replyOut);
            replyOut.flush();
        }
        if (blockReceiver != null) {
            String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
            blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, dataXceiverServer.getWriteThrottler(), targets, false);
            if (isTransfer) {
                LOG.trace("TRANSFER: send close-ack");
                writeResponse(SUCCESS, null, replyOut);
            }
        }
        if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            block.setGenerationStamp(latestGenerationStamp);
            block.setNumBytes(minBytesRcvd);
        }
        if (isDatanode || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
            datanode.closeBlock(block, null, storageUuid, isOnTransientStorage);
            LOG.info("Received {} src: {} dest: {} volume: {} of size {}", block, remoteAddress, localAddress, replica.getVolume(), block.getNumBytes());
        }
        if (isClient) {
            size = block.getNumBytes();
        }
    } catch (IOException ioe) {
        LOG.info("opWriteBlock {} received exception {}", block, ioe.toString());
        incrDatanodeNetworkErrors();
        throw ioe;
    } finally {
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        if (blockReceiver != null) {
            blockReceiver.releaseAnyRemainingReservedSpace();
        }
        IOUtils.closeStream(blockReceiver);
        setCurrentBlockReceiver(null);
    }
    datanode.getMetrics().addWriteBlockOp(elapsed());
    datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
167064.7225267hadoop
public Response put(InputStream is, @Context UriInfo uriInfo, @PathParam("path") String path, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException {
    if (accessMode == AccessMode.READONLY) {
        return Response.status(Response.Status.FORBIDDEN).build();
    }
    UserGroupInformation user = HttpUserGroupInformation.get();
    Response response;
    path = makeAbsolute(path);
    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
    MDC.put("hostname", request.getRemoteAddr());
    switch(op.value()) {
        case CREATE:
            {
                Boolean hasData = params.get(DataParam.NAME, DataParam.class);
                URI redirectURL = createUploadRedirectionURL(uriInfo, HttpFSFileSystem.Operation.CREATE);
                Boolean noRedirect = params.get(NoRedirectParam.NAME, NoRedirectParam.class);
                if (noRedirect) {
                    final String js = JsonUtil.toJsonString("Location", redirectURL);
                    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                } else if (hasData) {
                    Short permission = params.get(PermissionParam.NAME, PermissionParam.class);
                    Short unmaskedPermission = params.get(UnmaskedPermissionParam.NAME, UnmaskedPermissionParam.class);
                    Boolean override = params.get(OverwriteParam.NAME, OverwriteParam.class);
                    Short replication = params.get(ReplicationParam.NAME, ReplicationParam.class);
                    Long blockSize = params.get(BlockSizeParam.NAME, BlockSizeParam.class);
                    FSOperations.FSCreate command = new FSOperations.FSCreate(is, path, permission, override, replication, blockSize, unmaskedPermission);
                    fsExecute(user, command);
                    AUDIT_LOG.info("[{}] permission [{}] override [{}] " + "replication [{}] blockSize [{}] unmaskedpermission [{}]", new Object[] { path, permission, override, replication, blockSize, unmaskedPermission });
                    final String js = JsonUtil.toJsonString("Location", uriInfo.getAbsolutePath());
                    response = Response.created(uriInfo.getAbsolutePath()).type(MediaType.APPLICATION_JSON).entity(js).build();
                } else {
                    response = Response.temporaryRedirect(redirectURL).build();
                }
                break;
            }
        case ALLOWSNAPSHOT:
            {
                FSOperations.FSAllowSnapshot command = new FSOperations.FSAllowSnapshot(path);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] allowed snapshot", path);
                response = Response.ok().build();
                break;
            }
        case DISALLOWSNAPSHOT:
            {
                FSOperations.FSDisallowSnapshot command = new FSOperations.FSDisallowSnapshot(path);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] disallowed snapshot", path);
                response = Response.ok().build();
                break;
            }
        case CREATESNAPSHOT:
            {
                String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class);
                FSOperations.FSCreateSnapshot command = new FSOperations.FSCreateSnapshot(path, snapshotName);
                String json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] snapshot created as [{}]", path, snapshotName);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case SETXATTR:
            {
                String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
                String xattrValue = params.get(XAttrValueParam.NAME, XAttrValueParam.class);
                EnumSet<XAttrSetFlag> flag = params.get(XAttrSetFlagParam.NAME, XAttrSetFlagParam.class);
                FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(path, xattrName, xattrValue, flag);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
                response = Response.ok().build();
                break;
            }
        case RENAMESNAPSHOT:
            {
                String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class);
                String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class);
                FSOperations.FSRenameSnapshot command = new FSOperations.FSRenameSnapshot(path, oldSnapshotName, snapshotName);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] renamed snapshot [{}] to [{}]", path, oldSnapshotName, snapshotName);
                response = Response.ok().build();
                break;
            }
        case REMOVEXATTR:
            {
                String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
                FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(path, xattrName);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
                response = Response.ok().build();
                break;
            }
        case MKDIRS:
            {
                Short permission = params.get(PermissionParam.NAME, PermissionParam.class);
                Short unmaskedPermission = params.get(UnmaskedPermissionParam.NAME, UnmaskedPermissionParam.class);
                FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path, permission, unmaskedPermission);
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] permission [{}] unmaskedpermission [{}]", path, permission, unmaskedPermission);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case RENAME:
            {
                String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
                FSOperations.FSRename command = new FSOperations.FSRename(path, toPath);
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] to [{}]", path, toPath);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case SETOWNER:
            {
                String owner = params.get(OwnerParam.NAME, OwnerParam.class);
                String group = params.get(GroupParam.NAME, GroupParam.class);
                FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path, owner, group);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
                response = Response.ok().build();
                break;
            }
        case SETPERMISSION:
            {
                Short permission = params.get(PermissionParam.NAME, PermissionParam.class);
                FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path, permission);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to [{}]", path, permission);
                response = Response.ok().build();
                break;
            }
        case SETREPLICATION:
            {
                Short replication = params.get(ReplicationParam.NAME, ReplicationParam.class);
                FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path, replication);
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] to [{}]", path, replication);
                response = Response.ok(json).build();
                break;
            }
        case SETTIMES:
            {
                Long modifiedTime = params.get(ModifiedTimeParam.NAME, ModifiedTimeParam.class);
                Long accessTime = params.get(AccessTimeParam.NAME, AccessTimeParam.class);
                FSOperations.FSSetTimes command = new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime + ":" + accessTime);
                response = Response.ok().build();
                break;
            }
        case SETACL:
            {
                String aclSpec = params.get(AclPermissionParam.NAME, AclPermissionParam.class);
                FSOperations.FSSetAcl command = new FSOperations.FSSetAcl(path, aclSpec);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
                response = Response.ok().build();
                break;
            }
        case REMOVEACL:
            {
                FSOperations.FSRemoveAcl command = new FSOperations.FSRemoveAcl(path);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] removed acl", path);
                response = Response.ok().build();
                break;
            }
        case MODIFYACLENTRIES:
            {
                String aclSpec = params.get(AclPermissionParam.NAME, AclPermissionParam.class);
                FSOperations.FSModifyAclEntries command = new FSOperations.FSModifyAclEntries(path, aclSpec);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
                response = Response.ok().build();
                break;
            }
        case REMOVEACLENTRIES:
            {
                String aclSpec = params.get(AclPermissionParam.NAME, AclPermissionParam.class);
                FSOperations.FSRemoveAclEntries command = new FSOperations.FSRemoveAclEntries(path, aclSpec);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
                response = Response.ok().build();
                break;
            }
        case REMOVEDEFAULTACL:
            {
                FSOperations.FSRemoveDefaultAcl command = new FSOperations.FSRemoveDefaultAcl(path);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] remove default acl", path);
                response = Response.ok().build();
                break;
            }
        case SETSTORAGEPOLICY:
            {
                String policyName = params.get(PolicyNameParam.NAME, PolicyNameParam.class);
                FSOperations.FSSetStoragePolicy command = new FSOperations.FSSetStoragePolicy(path, policyName);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to policy [{}]", path, policyName);
                response = Response.ok().build();
                break;
            }
        case SETECPOLICY:
            {
                String policyName = params.get(ECPolicyParam.NAME, ECPolicyParam.class);
                FSOperations.FSSetErasureCodingPolicy command = new FSOperations.FSSetErasureCodingPolicy(path, policyName);
                fsExecute(user, command);
                AUDIT_LOG.info("[{}] to policy [{}]", path, policyName);
                response = Response.ok().build();
                break;
            }
        case SATISFYSTORAGEPOLICY:
            {
                FSOperations.FSSatisyStoragePolicy command = new FSOperations.FSSatisyStoragePolicy(path);
                fsExecute(user, command);
                AUDIT_LOG.info("satisfy storage policy for [{}]", path);
                response = Response.ok().build();
                break;
            }
        default:
            {
                throw new IOException(MessageFormat.format("Invalid HTTP PUT operation [{0}]", op.value()));
            }
    }
    return response;
}
168612.481360kafka
public void testReconciliationProcess() {
    String groupId = "fooup";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    String memberId3 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 2))));
            put(memberId3, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 1))));
        }
    }));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result;
    assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId1));
    assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId2));
    assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(0).setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignor("range").setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), result.response());
    assertRecordEquals(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3).setState(MemberState.UNRELEASED_PARTITIONS).setMemberEpoch(11).setPreviousMemberEpoch(0).build()), result.records().get(result.records().size() - 1));
    assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId1).setMemberEpoch(10));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId1).setMemberEpoch(10).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(0, 1)), new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(0))))), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1).setState(MemberState.UNREVOKED_PARTITIONS).setMemberEpoch(10).setPreviousMemberEpoch(10).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))).setPartitionsPendingRevocation(mkAssignment(mkTopicAssignment(fooTopicId, 2), mkTopicAssignment(barTopicId, 1))).build())), result.records());
    assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId1));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId2).setMemberEpoch(10).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Collections.singletonList(3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(2))))), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2).setState(MemberState.UNREVOKED_PARTITIONS).setMemberEpoch(10).setPreviousMemberEpoch(10).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3), mkTopicAssignment(barTopicId, 2))).setPartitionsPendingRevocation(mkAssignment(mkTopicAssignment(fooTopicId, 4, 5))).build())), result.records());
    assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId2));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(11));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3).setState(MemberState.UNRELEASED_PARTITIONS).setMemberEpoch(11).setPreviousMemberEpoch(11).build())), result.records());
    assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId1).setMemberEpoch(10).setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(0, 1)), new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(0)))));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId1).setMemberEpoch(11).setHeartbeatIntervalMs(5000), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(11).setPreviousMemberEpoch(10).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))).build())), result.records());
    assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId1));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId2).setMemberEpoch(10).setHeartbeatIntervalMs(5000), result.response());
    assertEquals(Collections.emptyList(), result.records());
    assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId2));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(11));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(1))))), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3).setState(MemberState.UNRELEASED_PARTITIONS).setMemberEpoch(11).setPreviousMemberEpoch(11).setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 1))).build())), result.records());
    assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(11));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000), result.response());
    assertEquals(Collections.emptyList(), result.records());
    assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10).setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Collections.singletonList(3)), new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(2)))));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId2).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(2, 3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(2))))), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(11).setPreviousMemberEpoch(10).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 2))).build())), result.records());
    assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId2));
    assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(11).setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(1)))));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(1))))), result.response());
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3).setState(MemberState.STABLE).setMemberEpoch(11).setPreviousMemberEpoch(11).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 1))).build())), result.records());
    assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId3));
    assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, context.consumerGroupState(groupId));
}
164944.4346237spring-framework
public byte[] toByteArray() {
    int size = 24 + 2 * interfaceCount;
    int fieldsCount = 0;
    FieldWriter fieldWriter = firstField;
    while (fieldWriter != null) {
        ++fieldsCount;
        size += fieldWriter.computeFieldInfoSize();
        fieldWriter = (FieldWriter) fieldWriter.fv;
    }
    int methodsCount = 0;
    MethodWriter methodWriter = firstMethod;
    while (methodWriter != null) {
        ++methodsCount;
        size += methodWriter.computeMethodInfoSize();
        methodWriter = (MethodWriter) methodWriter.mv;
    }
    int attributesCount = 0;
    if (innerClasses != null) {
        ++attributesCount;
        size += 8 + innerClasses.length;
        symbolTable.addConstantUtf8(Constants.INNER_CLASSES);
    }
    if (enclosingClassIndex != 0) {
        ++attributesCount;
        size += 10;
        symbolTable.addConstantUtf8(Constants.ENCLOSING_METHOD);
    }
    if ((accessFlags & Opcodes.ACC_SYNTHETIC) != 0 && (version & 0xFFFF) < Opcodes.V1_5) {
        ++attributesCount;
        size += 6;
        symbolTable.addConstantUtf8(Constants.SYNTHETIC);
    }
    if (signatureIndex != 0) {
        ++attributesCount;
        size += 8;
        symbolTable.addConstantUtf8(Constants.SIGNATURE);
    }
    if (sourceFileIndex != 0) {
        ++attributesCount;
        size += 8;
        symbolTable.addConstantUtf8(Constants.SOURCE_FILE);
    }
    if (debugExtension != null) {
        ++attributesCount;
        size += 6 + debugExtension.length;
        symbolTable.addConstantUtf8(Constants.SOURCE_DEBUG_EXTENSION);
    }
    if ((accessFlags & Opcodes.ACC_DEPRECATED) != 0) {
        ++attributesCount;
        size += 6;
        symbolTable.addConstantUtf8(Constants.DEPRECATED);
    }
    if (lastRuntimeVisibleAnnotation != null) {
        ++attributesCount;
        size += lastRuntimeVisibleAnnotation.computeAnnotationsSize(Constants.RUNTIME_VISIBLE_ANNOTATIONS);
    }
    if (lastRuntimeInvisibleAnnotation != null) {
        ++attributesCount;
        size += lastRuntimeInvisibleAnnotation.computeAnnotationsSize(Constants.RUNTIME_INVISIBLE_ANNOTATIONS);
    }
    if (lastRuntimeVisibleTypeAnnotation != null) {
        ++attributesCount;
        size += lastRuntimeVisibleTypeAnnotation.computeAnnotationsSize(Constants.RUNTIME_VISIBLE_TYPE_ANNOTATIONS);
    }
    if (lastRuntimeInvisibleTypeAnnotation != null) {
        ++attributesCount;
        size += lastRuntimeInvisibleTypeAnnotation.computeAnnotationsSize(Constants.RUNTIME_INVISIBLE_TYPE_ANNOTATIONS);
    }
    if (symbolTable.computeBootstrapMethodsSize() > 0) {
        ++attributesCount;
        size += symbolTable.computeBootstrapMethodsSize();
    }
    if (moduleWriter != null) {
        attributesCount += moduleWriter.getAttributeCount();
        size += moduleWriter.computeAttributesSize();
    }
    if (nestHostClassIndex != 0) {
        ++attributesCount;
        size += 8;
        symbolTable.addConstantUtf8(Constants.NEST_HOST);
    }
    if (nestMemberClasses != null) {
        ++attributesCount;
        size += 8 + nestMemberClasses.length;
        symbolTable.addConstantUtf8(Constants.NEST_MEMBERS);
    }
    if (permittedSubclasses != null) {
        ++attributesCount;
        size += 8 + permittedSubclasses.length;
        symbolTable.addConstantUtf8(Constants.PERMITTED_SUBCLASSES);
    }
    int recordComponentCount = 0;
    int recordSize = 0;
    if ((accessFlags & Opcodes.ACC_RECORD) != 0 || firstRecordComponent != null) {
        RecordComponentWriter recordComponentWriter = firstRecordComponent;
        while (recordComponentWriter != null) {
            ++recordComponentCount;
            recordSize += recordComponentWriter.computeRecordComponentInfoSize();
            recordComponentWriter = (RecordComponentWriter) recordComponentWriter.delegate;
        }
        ++attributesCount;
        size += 8 + recordSize;
        symbolTable.addConstantUtf8(Constants.RECORD);
    }
    if (firstAttribute != null) {
        attributesCount += firstAttribute.getAttributeCount();
        size += firstAttribute.computeAttributesSize(symbolTable);
    }
    size += symbolTable.getConstantPoolLength();
    int constantPoolCount = symbolTable.getConstantPoolCount();
    if (constantPoolCount > 0xFFFF) {
        throw new ClassTooLargeException(symbolTable.getClassName(), constantPoolCount);
    }
    ByteVector result = new ByteVector(size);
    result.putInt(0xCAFEBABE).putInt(version);
    symbolTable.putConstantPool(result);
    int mask = (version & 0xFFFF) < Opcodes.V1_5 ? Opcodes.ACC_SYNTHETIC : 0;
    result.putShort(accessFlags & ~mask).putShort(thisClass).putShort(superClass);
    result.putShort(interfaceCount);
    for (int i = 0; i < interfaceCount; ++i) {
        result.putShort(interfaces[i]);
    }
    result.putShort(fieldsCount);
    fieldWriter = firstField;
    while (fieldWriter != null) {
        fieldWriter.putFieldInfo(result);
        fieldWriter = (FieldWriter) fieldWriter.fv;
    }
    result.putShort(methodsCount);
    boolean hasFrames = false;
    boolean hasAsmInstructions = false;
    methodWriter = firstMethod;
    while (methodWriter != null) {
        hasFrames |= methodWriter.hasFrames();
        hasAsmInstructions |= methodWriter.hasAsmInstructions();
        methodWriter.putMethodInfo(result);
        methodWriter = (MethodWriter) methodWriter.mv;
    }
    result.putShort(attributesCount);
    if (innerClasses != null) {
        result.putShort(symbolTable.addConstantUtf8(Constants.INNER_CLASSES)).putInt(innerClasses.length + 2).putShort(numberOfInnerClasses).putByteArray(innerClasses.data, 0, innerClasses.length);
    }
    if (enclosingClassIndex != 0) {
        result.putShort(symbolTable.addConstantUtf8(Constants.ENCLOSING_METHOD)).putInt(4).putShort(enclosingClassIndex).putShort(enclosingMethodIndex);
    }
    if ((accessFlags & Opcodes.ACC_SYNTHETIC) != 0 && (version & 0xFFFF) < Opcodes.V1_5) {
        result.putShort(symbolTable.addConstantUtf8(Constants.SYNTHETIC)).putInt(0);
    }
    if (signatureIndex != 0) {
        result.putShort(symbolTable.addConstantUtf8(Constants.SIGNATURE)).putInt(2).putShort(signatureIndex);
    }
    if (sourceFileIndex != 0) {
        result.putShort(symbolTable.addConstantUtf8(Constants.SOURCE_FILE)).putInt(2).putShort(sourceFileIndex);
    }
    if (debugExtension != null) {
        int length = debugExtension.length;
        result.putShort(symbolTable.addConstantUtf8(Constants.SOURCE_DEBUG_EXTENSION)).putInt(length).putByteArray(debugExtension.data, 0, length);
    }
    if ((accessFlags & Opcodes.ACC_DEPRECATED) != 0) {
        result.putShort(symbolTable.addConstantUtf8(Constants.DEPRECATED)).putInt(0);
    }
    AnnotationWriter.putAnnotations(symbolTable, lastRuntimeVisibleAnnotation, lastRuntimeInvisibleAnnotation, lastRuntimeVisibleTypeAnnotation, lastRuntimeInvisibleTypeAnnotation, result);
    symbolTable.putBootstrapMethods(result);
    if (moduleWriter != null) {
        moduleWriter.putAttributes(result);
    }
    if (nestHostClassIndex != 0) {
        result.putShort(symbolTable.addConstantUtf8(Constants.NEST_HOST)).putInt(2).putShort(nestHostClassIndex);
    }
    if (nestMemberClasses != null) {
        result.putShort(symbolTable.addConstantUtf8(Constants.NEST_MEMBERS)).putInt(nestMemberClasses.length + 2).putShort(numberOfNestMemberClasses).putByteArray(nestMemberClasses.data, 0, nestMemberClasses.length);
    }
    if (permittedSubclasses != null) {
        result.putShort(symbolTable.addConstantUtf8(Constants.PERMITTED_SUBCLASSES)).putInt(permittedSubclasses.length + 2).putShort(numberOfPermittedSubclasses).putByteArray(permittedSubclasses.data, 0, permittedSubclasses.length);
    }
    if ((accessFlags & Opcodes.ACC_RECORD) != 0 || firstRecordComponent != null) {
        result.putShort(symbolTable.addConstantUtf8(Constants.RECORD)).putInt(recordSize + 2).putShort(recordComponentCount);
        RecordComponentWriter recordComponentWriter = firstRecordComponent;
        while (recordComponentWriter != null) {
            recordComponentWriter.putRecordComponentInfo(result);
            recordComponentWriter = (RecordComponentWriter) recordComponentWriter.delegate;
        }
    }
    if (firstAttribute != null) {
        firstAttribute.putAttributes(symbolTable, result);
    }
    if (hasAsmInstructions) {
        return replaceAsmInstructions(result.data, hasFrames);
    } else {
        return result.data;
    }
}
165040.8250218wildfly
private void processDeployment(final WarMetaData warMetaData, final DeploymentUnit deploymentUnit, final RequirementServiceTarget serviceTarget, final String deploymentName, final String hostName, final String serverInstanceName, final boolean isDefaultWebModule) throws DeploymentUnitProcessingException {
    ResourceRoot deploymentResourceRoot = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT);
    final VirtualFile deploymentRoot = deploymentResourceRoot.getRoot();
    final Module module = deploymentUnit.getAttachment(Attachments.MODULE);
    if (module == null) {
        throw new DeploymentUnitProcessingException(UndertowLogger.ROOT_LOGGER.failedToResolveModule(deploymentUnit));
    }
    final JBossWebMetaData metaData = warMetaData.getMergedJBossWebMetaData();
    final List<SetupAction> setupActions = deploymentUnit.getAttachmentList(org.jboss.as.ee.component.Attachments.WEB_SETUP_ACTIONS);
    CapabilityServiceSupport capabilitySupport = deploymentUnit.getAttachment(Attachments.CAPABILITY_SERVICE_SUPPORT);
    ScisMetaData scisMetaData = deploymentUnit.getAttachment(ScisMetaData.ATTACHMENT_KEY);
    final Set<ServiceName> dependentComponents = new HashSet<>();
    final List<ServiceName> components = deploymentUnit.getAttachmentList(WebComponentDescription.WEB_COMPONENTS);
    final Set<ServiceName> failed = deploymentUnit.getAttachment(org.jboss.as.ee.component.Attachments.FAILED_COMPONENTS);
    for (final ServiceName component : components) {
        if (!failed.contains(component)) {
            dependentComponents.add(component);
        }
    }
    String servletContainerName = Optional.ofNullable(metaData.getServletContainerName()).orElse(this.defaultContainer);
    final boolean componentRegistryExists = deploymentUnit.getAttachment(org.jboss.as.ee.component.Attachments.COMPONENT_REGISTRY) != null;
    final ComponentRegistry componentRegistry = componentRegistryExists ? deploymentUnit.getAttachment(org.jboss.as.ee.component.Attachments.COMPONENT_REGISTRY) : new ComponentRegistry(null);
    final ClassLoader loader = module.getClassLoader();
    final WebInjectionContainer injectionContainer = (metaData.getDistributable() == null) ? new CachingWebInjectionContainer(loader, componentRegistry) : new SimpleWebInjectionContainer(loader, componentRegistry);
    DeploymentUnit parentDeploymentUnit = deploymentUnit.getParent();
    String jaccContextId = metaData.getJaccContextID();
    if (jaccContextId == null) {
        jaccContextId = deploymentUnit.getName();
    }
    if (parentDeploymentUnit != null) {
        jaccContextId = parentDeploymentUnit.getName() + "!" + jaccContextId;
    }
    String pathName = pathNameOfDeployment(deploymentUnit, metaData, isDefaultWebModule);
    final Set<ServiceName> additionalDependencies = new HashSet<>();
    for (final SetupAction setupAction : setupActions) {
        Set<ServiceName> dependencies = setupAction.dependencies();
        if (dependencies != null) {
            additionalDependencies.addAll(dependencies);
        }
    }
    if (!deploymentResourceRoot.isUsePhysicalCodeSource()) {
        try {
            deploymentUnit.addToAttachmentList(ServletContextAttribute.ATTACHMENT_KEY, new ServletContextAttribute(Constants.CODE_SOURCE_ATTRIBUTE_NAME, deploymentRoot.toURL()));
        } catch (MalformedURLException e) {
            throw new DeploymentUnitProcessingException(e);
        }
    }
    deploymentUnit.addToAttachmentList(ServletContextAttribute.ATTACHMENT_KEY, new ServletContextAttribute(Constants.PERMISSION_COLLECTION_ATTRIBUTE_NAME, deploymentUnit.getAttachment(Attachments.MODULE_PERMISSIONS)));
    additionalDependencies.addAll(warMetaData.getAdditionalDependencies());
    final ServiceName hostServiceName = capabilitySupport.getCapabilityServiceName(Capabilities.CAPABILITY_HOST, serverInstanceName, hostName);
    final ServiceName legacyDeploymentServiceName = UndertowService.deploymentServiceName(serverInstanceName, hostName, pathName);
    final ServiceName deploymentServiceName = UndertowService.deploymentServiceName(deploymentUnit.getServiceName());
    StartupCountdown countDown = deploymentUnit.getAttachment(STARTUP_COUNTDOWN);
    if (countDown != null) {
        deploymentUnit.addToAttachmentList(UndertowAttachments.UNDERTOW_INITIAL_HANDLER_CHAIN_WRAPPERS, handler -> new ComponentStartupCountdownHandler(handler, countDown));
    }
    String securityDomainName = deploymentUnit.getAttachment(UndertowAttachments.RESOLVED_SECURITY_DOMAIN);
    TldsMetaData tldsMetaData = deploymentUnit.getAttachment(TldsMetaData.ATTACHMENT_KEY);
    final ServiceName deploymentInfoServiceName = deploymentServiceName.append(UndertowDeploymentInfoService.SERVICE_NAME);
    final ServiceName legacyDeploymentInfoServiceName = legacyDeploymentServiceName.append(UndertowDeploymentInfoService.SERVICE_NAME);
    final RequirementServiceBuilder<?> builder = serviceTarget.addService();
    final Consumer<DeploymentInfo> deploymentInfo = builder.provides(deploymentInfoServiceName, legacyDeploymentInfoServiceName);
    final Supplier<UndertowService> undertowService = builder.requires(capabilitySupport.getCapabilityServiceName(Capabilities.CAPABILITY_UNDERTOW));
    final Supplier<ServletContainerService> servletContainerService = builder.requires(capabilitySupport.getCapabilityServiceName(Capabilities.CAPABILITY_SERVLET_CONTAINER, servletContainerName));
    final Supplier<ComponentRegistry> componentRegistryDependency = componentRegistryExists ? builder.requires(ComponentRegistry.serviceName(deploymentUnit)) : Functions.constantSupplier(componentRegistry);
    final Supplier<Host> host = builder.requires(hostServiceName);
    final Supplier<SuspendController> suspendController = builder.requires(capabilitySupport.getCapabilityServiceName(Capabilities.REF_SUSPEND_CONTROLLER));
    final Supplier<ServerEnvironment> serverEnvironment = builder.requires(ServerEnvironment.SERVICE_DESCRIPTOR);
    Supplier<SecurityDomain> securityDomain = null;
    Supplier<HttpServerAuthenticationMechanismFactory> mechanismFactorySupplier = null;
    Supplier<BiFunction<DeploymentInfo, Function<String, RunAsIdentityMetaData>, Registration>> applySecurityFunction = null;
    for (final ServiceName additionalDependency : additionalDependencies) {
        builder.requires(additionalDependency);
    }
    final SecurityMetaData securityMetaData = deploymentUnit.getAttachment(ATTACHMENT_KEY);
    if (isVirtualDomainRequired(deploymentUnit) || isVirtualMechanismFactoryRequired(deploymentUnit)) {
        securityDomain = builder.requires(securityMetaData.getSecurityDomain());
    } else if (securityDomainName != null) {
        if (mappedSecurityDomain.test(securityDomainName)) {
            applySecurityFunction = builder.requires(capabilitySupport.getCapabilityServiceName(Capabilities.CAPABILITY_APPLICATION_SECURITY_DOMAIN, securityDomainName));
        } else {
            throw ROOT_LOGGER.deploymentConfiguredForLegacySecurity();
        }
    }
    if (isVirtualMechanismFactoryRequired(deploymentUnit)) {
        if (securityMetaData instanceof AdvancedSecurityMetaData) {
            mechanismFactorySupplier = builder.requires(((AdvancedSecurityMetaData) securityMetaData).getHttpServerAuthenticationMechanismFactory());
        }
    }
    Supplier<ControlPoint> controlPoint = RequestControllerActivationMarker.isRequestControllerEnabled(deploymentUnit) ? builder.requires(ControlPointService.serviceName(Optional.ofNullable(parentDeploymentUnit).orElse(deploymentUnit).getName(), UndertowExtension.SUBSYSTEM_NAME)) : null;
    SharedSessionManagerConfig sharedSessionManagerConfig = parentDeploymentUnit != null ? parentDeploymentUnit.getAttachment(SharedSessionManagerConfig.ATTACHMENT_KEY) : null;
    ServiceName sessionManagerFactoryServiceName = (sharedSessionManagerConfig != null) ? parentDeploymentUnit.getServiceName().append(SharedSessionManagerConfig.SHARED_SESSION_MANAGER_SERVICE_NAME) : deploymentServiceName.append("session");
    ServiceName sessionConfigWrapperFactoryServiceName = (sharedSessionManagerConfig != null) ? parentDeploymentUnit.getServiceName().append(SharedSessionManagerConfig.SHARED_SESSION_AFFINITY_SERVICE_NAME) : deploymentServiceName.append("affinity");
    ServletContainerService servletContainer = deploymentUnit.getAttachment(UndertowAttachments.SERVLET_CONTAINER_SERVICE);
    Supplier<SessionManagerFactory> sessionManagerFactory = (servletContainer != null) ? builder.requires(sessionManagerFactoryServiceName) : null;
    Supplier<Function<CookieConfig, SessionConfigWrapper>> sessionConfigWrapperFactory = (servletContainer != null) ? builder.requires(sessionConfigWrapperFactoryServiceName) : null;
    if ((servletContainer != null) && (sharedSessionManagerConfig == null)) {
        Integer maxActiveSessions = (metaData.getMaxActiveSessions() != null) ? metaData.getMaxActiveSessions() : servletContainer.getMaxSessions();
        SessionConfigMetaData sessionConfig = metaData.getSessionConfig();
        int defaultSessionTimeout = ((sessionConfig != null) && sessionConfig.getSessionTimeoutSet()) ? sessionConfig.getSessionTimeout() : servletContainer.getDefaultSessionTimeout();
        SessionManagementProvider provider = this.getDistributableWebDeploymentProvider(deploymentUnit, metaData);
        SessionManagerFactoryConfiguration configuration = new SessionManagerFactoryConfiguration() {

            @Override
            public String getServerName() {
                return serverInstanceName;
            }

            @Override
            public String getDeploymentName() {
                return deploymentName;
            }

            @Override
            public DeploymentUnit getDeploymentUnit() {
                return deploymentUnit;
            }

            @Override
            public Integer getMaxActiveSessions() {
                return (maxActiveSessions != null) && (maxActiveSessions > 0) ? maxActiveSessions : null;
            }

            @Override
            public Duration getDefaultSessionTimeout() {
                return Duration.ofMinutes(defaultSessionTimeout);
            }
        };
        for (CapabilityServiceConfigurator configurator : provider.getSessionManagerFactoryServiceConfigurators(sessionManagerFactoryServiceName, configuration)) {
            configurator.configure(capabilitySupport).build(serviceTarget).install();
        }
        for (CapabilityServiceConfigurator configurator : provider.getSessionAffinityServiceConfigurators(sessionConfigWrapperFactoryServiceName, configuration)) {
            configurator.configure(capabilitySupport).build(serviceTarget).install();
        }
    }
    UndertowDeploymentInfoService undertowDeploymentInfoService = UndertowDeploymentInfoService.builder().setAttributes(deploymentUnit.getAttachmentList(ServletContextAttribute.ATTACHMENT_KEY)).setContextPath(pathName).setDeploymentName(deploymentName).setDeploymentRoot(deploymentRoot).setMergedMetaData(warMetaData.getMergedJBossWebMetaData()).setModule(module).setScisMetaData(scisMetaData).setJaccContextId(jaccContextId).setSecurityDomain(securityDomainName).setTldInfo(createTldsInfo(tldsMetaData, tldsMetaData == null ? null : tldsMetaData.getSharedTlds(deploymentUnit))).setSetupActions(setupActions).setSharedSessionManagerConfig(sharedSessionManagerConfig).setOverlays(warMetaData.getOverlays()).setExpressionFactoryWrappers(deploymentUnit.getAttachmentList(ExpressionFactoryWrapper.ATTACHMENT_KEY)).setPredicatedHandlers(deploymentUnit.getAttachment(UndertowHandlersDeploymentProcessor.PREDICATED_HANDLERS)).setInitialHandlerChainWrappers(deploymentUnit.getAttachmentList(UndertowAttachments.UNDERTOW_INITIAL_HANDLER_CHAIN_WRAPPERS)).setInnerHandlerChainWrappers(deploymentUnit.getAttachmentList(UndertowAttachments.UNDERTOW_INNER_HANDLER_CHAIN_WRAPPERS)).setOuterHandlerChainWrappers(deploymentUnit.getAttachmentList(UndertowAttachments.UNDERTOW_OUTER_HANDLER_CHAIN_WRAPPERS)).setThreadSetupActions(deploymentUnit.getAttachmentList(UndertowAttachments.UNDERTOW_THREAD_SETUP_ACTIONS)).setServletExtensions(deploymentUnit.getAttachmentList(UndertowAttachments.UNDERTOW_SERVLET_EXTENSIONS)).setExplodedDeployment(ExplodedDeploymentMarker.isExplodedDeployment(deploymentUnit)).setWebSocketDeploymentInfo(deploymentUnit.getAttachment(UndertowAttachments.WEB_SOCKET_DEPLOYMENT_INFO)).setTempDir(warMetaData.getTempDir()).setExternalResources(deploymentUnit.getAttachmentList(UndertowAttachments.EXTERNAL_RESOURCES)).setAllowSuspendedRequests(deploymentUnit.getAttachmentList(UndertowAttachments.ALLOW_REQUEST_WHEN_SUSPENDED)).createUndertowDeploymentInfoService(deploymentInfo, undertowService, sessionManagerFactory, sessionConfigWrapperFactory, servletContainerService, componentRegistryDependency, host, controlPoint, suspendController, serverEnvironment, securityDomain, mechanismFactorySupplier, applySecurityFunction);
    builder.setInstance(undertowDeploymentInfoService);
    final Set<String> seenExecutors = new HashSet<String>();
    if (metaData.getExecutorName() != null) {
        final Supplier<Executor> executor = builder.requires(IOServices.WORKER.append(metaData.getExecutorName()));
        undertowDeploymentInfoService.addInjectedExecutor(metaData.getExecutorName(), executor);
        seenExecutors.add(metaData.getExecutorName());
    }
    if (metaData.getServlets() != null) {
        for (JBossServletMetaData servlet : metaData.getServlets()) {
            if (servlet.getExecutorName() != null && !seenExecutors.contains(servlet.getExecutorName())) {
                final Supplier<Executor> executor = builder.requires(IOServices.WORKER.append(servlet.getExecutorName()));
                undertowDeploymentInfoService.addInjectedExecutor(servlet.getExecutorName(), executor);
                seenExecutors.add(servlet.getExecutorName());
            }
        }
    }
    try {
        builder.install();
    } catch (DuplicateServiceException e) {
        throw UndertowLogger.ROOT_LOGGER.duplicateHostContextDeployments(deploymentInfoServiceName, e.getMessage());
    }
    final ServiceBuilder<?> udsBuilder = serviceTarget.addService(deploymentServiceName);
    final Consumer<UndertowDeploymentService> sConsumer = udsBuilder.provides(deploymentServiceName, legacyDeploymentServiceName);
    final Supplier<ServletContainerService> cSupplier = udsBuilder.requires(UndertowService.SERVLET_CONTAINER.append(defaultContainer));
    final Supplier<ExecutorService> seSupplier = Services.requireServerExecutor(udsBuilder);
    final Supplier<Host> hSupplier = udsBuilder.requires(hostServiceName);
    final Supplier<DeploymentInfo> diSupplier = udsBuilder.requires(deploymentInfoServiceName);
    for (final ServiceName webDependency : deploymentUnit.getAttachmentList(Attachments.WEB_DEPENDENCIES)) {
        udsBuilder.requires(webDependency);
    }
    for (final ServiceName dependentComponent : dependentComponents) {
        udsBuilder.requires(dependentComponent);
    }
    udsBuilder.setInstance(new UndertowDeploymentService(sConsumer, cSupplier, seSupplier, hSupplier, diSupplier, injectionContainer, true));
    udsBuilder.install();
    deploymentUnit.addToAttachmentList(Attachments.DEPLOYMENT_COMPLETE_SERVICES, deploymentServiceName);
    final boolean elytronJacc = capabilitySupport.hasCapability(ELYTRON_JACC_CAPABILITY_NAME);
    if (elytronJacc) {
        WarJACCDeployer deployer = new WarJACCDeployer();
        JaccService<WarMetaData> jaccService = deployer.deploy(deploymentUnit, jaccContextId);
        if (jaccService != null) {
            final ServiceName jaccServiceName = deploymentUnit.getServiceName().append(JaccService.SERVICE_NAME);
            ServiceBuilder<?> jaccBuilder = serviceTarget.addService(jaccServiceName, jaccService);
            if (parentDeploymentUnit != null) {
                jaccBuilder.addDependency(parentDeploymentUnit.getServiceName().append(JaccService.SERVICE_NAME), PolicyConfiguration.class, jaccService.getParentPolicyInjector());
            }
            jaccBuilder.requires(capabilitySupport.getCapabilityServiceName(elytronJacc ? ELYTRON_JACC_CAPABILITY_NAME : LEGACY_JACC_CAPABILITY_NAME));
            jaccBuilder.requires(deploymentServiceName);
            jaccBuilder.setInitialMode(Mode.PASSIVE).install();
        }
    }
    final DeploymentResourceSupport deploymentResourceSupport = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_RESOURCE_SUPPORT);
    final ModelNode node = deploymentResourceSupport.getDeploymentSubsystemModel(UndertowExtension.SUBSYSTEM_NAME);
    node.get(DeploymentDefinition.CONTEXT_ROOT.getName()).set("".equals(pathName) ? "/" : pathName);
    node.get(DeploymentDefinition.VIRTUAL_HOST.getName()).set(hostName);
    node.get(DeploymentDefinition.SERVER.getName()).set(serverInstanceName);
    processManagement(deploymentUnit, metaData);
}
1712314.751277cassandra
public void testSelectLiteral() throws Throwable {
    long timestampInMicros = System.currentTimeMillis() * 1000;
    createTable("CREATE TABLE %s (pk int, ck int, t text, PRIMARY KEY (pk, ck) )");
    execute("INSERT INTO %s (pk, ck, t) VALUES (?, ?, ?) USING TIMESTAMP ?", 1, 1, "one", timestampInMicros);
    execute("INSERT INTO %s (pk, ck, t) VALUES (?, ?, ?) USING TIMESTAMP ?", 1, 2, "two", timestampInMicros);
    execute("INSERT INTO %s (pk, ck, t) VALUES (?, ?, ?) USING TIMESTAMP ?", 1, 3, "three", timestampInMicros);
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, 'a const' FROM %s");
    assertConstantResult(execute("SELECT ck, t, (text)'a const' FROM %s"), "a const");
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, 42 FROM %s");
    assertConstantResult(execute("SELECT ck, t, (smallint)42 FROM %s"), (short) 42);
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, (1, 'foo') FROM %s");
    assertConstantResult(execute("SELECT ck, t, (tuple<int, text>)(1, 'foo') FROM %s"), tuple(1, "foo"));
    assertInvalidMessage("Cannot infer type for term ((1)) in selection clause", "SELECT ck, t, ((1)) FROM %s");
    assertInvalidMessage("Cannot infer type for term ((tuple<int>)(1))", "SELECT ck, t, ((tuple<int>)(1)) FROM %s");
    assertConstantResult(execute("SELECT ck, t, (tuple<tuple<int>>)((1)) FROM %s"), tuple(tuple(1)));
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, [1, 2, 3] FROM %s");
    assertConstantResult(execute("SELECT ck, t, (list<int>)[1, 2, 3] FROM %s"), list(1, 2, 3));
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, {1, 2, 3} FROM %s");
    assertConstantResult(execute("SELECT ck, t, (set<int>){1, 2, 3} FROM %s"), set(1, 2, 3));
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, {1: 'foo', 2: 'bar', 3: 'baz'} FROM %s");
    assertConstantResult(execute("SELECT ck, t, (map<int, text>){1: 'foo', 2: 'bar', 3: 'baz'} FROM %s"), map(1, "foo", 2, "bar", 3, "baz"));
    assertInvalidMessage("Cannot infer type for term", "SELECT ck, t, {} FROM %s");
    assertConstantResult(execute("SELECT ck, t, (map<int, text>){} FROM %s"), map());
    assertConstantResult(execute("SELECT ck, t, (set<int>){} FROM %s"), set());
    assertColumnNames(execute("SELECT ck, t, (int)42, (int)43 FROM %s"), "ck", "t", "(int)42", "(int)43");
    assertRows(execute("SELECT ck, t, (int) 42, (int) 43 FROM %s"), row(1, "one", 42, 43), row(2, "two", 42, 43), row(3, "three", 42, 43));
    assertRows(execute("SELECT min(ck), max(ck), [min(ck), max(ck)] FROM %s"), row(1, 3, list(1, 3)));
    assertRows(execute("SELECT [min(ck), max(ck)] FROM %s"), row(list(1, 3)));
    assertRows(execute("SELECT {min(ck), max(ck)} FROM %s"), row(set(1, 3)));
    assertInvalidMessage("Cannot infer type for term {'min': system.min(ck), 'max': system.max(ck)}", "SELECT {'min' : min(ck), 'max' : max(ck)} FROM %s");
    assertRows(execute("SELECT (map<text, int>){'min' : min(ck), 'max' : max(ck)} FROM %s"), row(map("min", 1, "max", 3)));
    assertRows(execute("SELECT [1, min(ck), max(ck)] FROM %s"), row(list(1, 1, 3)));
    assertRows(execute("SELECT {1, min(ck), max(ck)} FROM %s"), row(set(1, 1, 3)));
    assertRows(execute("SELECT (map<text, int>) {'litteral' : 1, 'min' : min(ck), 'max' : max(ck)} FROM %s"), row(map("litteral", 1, "min", 1, "max", 3)));
    assertRows(execute("SELECT [[], [min(ck), max(ck)]] FROM %s"), row(list(list(), list(1, 3))));
    assertRows(execute("SELECT [[], [CAST(pk AS BIGINT), CAST(ck AS BIGINT), WRITETIME(t)]] FROM %s"), row(list(list(), list(1L, 1L, timestampInMicros))), row(list(list(), list(1L, 2L, timestampInMicros))), row(list(list(), list(1L, 3L, timestampInMicros))));
    assertRows(execute("SELECT [[min(ck)], [max(ck)]] FROM %s"), row(list(list(1), list(3))));
    assertRows(execute("SELECT [[min(ck)], ([max(ck)])] FROM %s"), row(list(list(1), list(3))));
    assertRows(execute("SELECT [[pk], [ck]] FROM %s"), row(list(list(1), list(1))), row(list(list(1), list(2))), row(list(list(1), list(3))));
    assertRows(execute("SELECT [[pk], [ck]] FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(list(list(1), list(3))), row(list(list(1), list(2))), row(list(list(1), list(1))));
    assertRows(execute("SELECT [{}, {min(ck), max(ck)}] FROM %s"), row(list(set(), set(1, 3))));
    assertRows(execute("SELECT [{}, {CAST(pk AS BIGINT), CAST(ck AS BIGINT), WRITETIME(t)}] FROM %s"), row(list(set(), set(1L, 1L, timestampInMicros))), row(list(set(), set(1L, 2L, timestampInMicros))), row(list(set(), set(1L, 3L, timestampInMicros))));
    assertRows(execute("SELECT [{min(ck)}, {max(ck)}] FROM %s"), row(list(set(1), set(3))));
    assertRows(execute("SELECT [{min(ck)}, ({max(ck)})] FROM %s"), row(list(set(1), set(3))));
    assertRows(execute("SELECT [{pk}, {ck}] FROM %s"), row(list(set(1), set(1))), row(list(set(1), set(2))), row(list(set(1), set(3))));
    assertRows(execute("SELECT [{pk}, {ck}] FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(list(set(1), set(3))), row(list(set(1), set(2))), row(list(set(1), set(1))));
    assertRows(execute("SELECT [{}, (map<text, int>){'min' : min(ck), 'max' : max(ck)}] FROM %s"), row(list(map(), map("min", 1, "max", 3))));
    assertRows(execute("SELECT [{}, (map<text, bigint>){'pk' : CAST(pk AS BIGINT), 'ck' : CAST(ck AS BIGINT), 'writetime' : WRITETIME(t)}] FROM %s"), row(list(map(), map("pk", 1L, "ck", 1L, "writetime", timestampInMicros))), row(list(map(), map("pk", 1L, "ck", 2L, "writetime", timestampInMicros))), row(list(map(), map("pk", 1L, "ck", 3L, "writetime", timestampInMicros))));
    assertRows(execute("SELECT [{}, (map<text, int>){'pk' : pk, 'ck' : ck}] FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(list(map(), map("pk", 1, "ck", 3))), row(list(map(), map("pk", 1, "ck", 2))), row(list(map(), map("pk", 1, "ck", 1))));
    assertRows(execute("SELECT [(pk, ck, WRITETIME(t))] FROM %s"), row(list(tuple(1, 1, timestampInMicros))), row(list(tuple(1, 2, timestampInMicros))), row(list(tuple(1, 3, timestampInMicros))));
    assertRows(execute("SELECT [(min(ck), max(ck))] FROM %s"), row(list(tuple(1, 3))));
    assertRows(execute("SELECT [(CAST(pk AS BIGINT), CAST(ck AS BIGINT)), (t, WRITETIME(t))] FROM %s"), row(list(tuple(1L, 1L), tuple("one", timestampInMicros))), row(list(tuple(1L, 2L), tuple("two", timestampInMicros))), row(list(tuple(1L, 3L), tuple("three", timestampInMicros))));
    String type = createType("CREATE TYPE %s(a int, b int, c bigint)");
    assertRows(execute("SELECT [(" + type + "){a : min(ck), b: max(ck)}] FROM %s"), row(list(userType("a", 1, "b", 3, "c", null))));
    assertRows(execute("SELECT [(" + type + "){a : pk, b : ck, c : WRITETIME(t)}] FROM %s"), row(list(userType("a", 1, "b", 1, "c", timestampInMicros))), row(list(userType("a", 1, "b", 2, "c", timestampInMicros))), row(list(userType("a", 1, "b", 3, "c", timestampInMicros))));
    assertRows(execute("SELECT [(" + type + "){a : pk, b : ck, c : WRITETIME(t)}] FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(list(userType("a", 1, "b", 3, "c", timestampInMicros))), row(list(userType("a", 1, "b", 2, "c", timestampInMicros))), row(list(userType("a", 1, "b", 1, "c", timestampInMicros))));
    assertRows(execute("SELECT {[], [min(ck), max(ck)]} FROM %s"), row(set(list(), list(1, 3))));
    assertRows(execute("SELECT {[], [pk, ck]} FROM %s LIMIT 2"), row(set(list(), list(1, 1))), row(set(list(), list(1, 2))));
    assertRows(execute("SELECT {[], [pk, ck]} FROM %s WHERE pk = 1 ORDER BY ck DESC LIMIT 2"), row(set(list(), list(1, 3))), row(set(list(), list(1, 2))));
    assertRows(execute("SELECT {[min(ck)], ([max(ck)])} FROM %s"), row(set(list(1), list(3))));
    assertRows(execute("SELECT {[pk], ([ck])} FROM %s"), row(set(list(1), list(1))), row(set(list(1), list(2))), row(set(list(1), list(3))));
    assertRows(execute("SELECT {([min(ck)]), [max(ck)]} FROM %s"), row(set(list(1), list(3))));
    assertRows(execute("SELECT {{}, {min(ck), max(ck)}} FROM %s"), row(set(set(), set(1, 3))));
    assertRows(execute("SELECT {{}, {pk, ck}} FROM %s LIMIT 2"), row(set(set(), set(1, 1))), row(set(set(), set(1, 2))));
    assertRows(execute("SELECT {{}, {pk, ck}} FROM %s WHERE pk = 1 ORDER BY ck DESC LIMIT 2"), row(set(set(), set(1, 3))), row(set(set(), set(1, 2))));
    assertRows(execute("SELECT {{min(ck)}, ({max(ck)})} FROM %s"), row(set(set(1), set(3))));
    assertRows(execute("SELECT {{pk}, ({ck})} FROM %s"), row(set(set(1), set(1))), row(set(set(1), set(2))), row(set(set(1), set(3))));
    assertRows(execute("SELECT {({min(ck)}), {max(ck)}} FROM %s"), row(set(set(1), set(3))));
    assertRows(execute("SELECT {{}, (map<text, int>){'min' : min(ck), 'max' : max(ck)}} FROM %s"), row(set(map(), map("min", 1, "max", 3))));
    assertRows(execute("SELECT {{}, (map<text, int>){'pk' : pk, 'ck' : ck}} FROM %s"), row(set(map(), map("pk", 1, "ck", 1))), row(set(map(), map("pk", 1, "ck", 2))), row(set(map(), map("pk", 1, "ck", 3))));
    assertRows(execute("SELECT {(pk, ck, WRITETIME(t))} FROM %s"), row(set(tuple(1, 1, timestampInMicros))), row(set(tuple(1, 2, timestampInMicros))), row(set(tuple(1, 3, timestampInMicros))));
    assertRows(execute("SELECT {(min(ck), max(ck))} FROM %s"), row(set(tuple(1, 3))));
    assertRows(execute("SELECT {(" + type + "){a : min(ck), b: max(ck)}} FROM %s"), row(set(userType("a", 1, "b", 3, "c", null))));
    assertRows(execute("SELECT {(" + type + "){a : pk, b : ck, c : WRITETIME(t)}} FROM %s"), row(set(userType("a", 1, "b", 1, "c", timestampInMicros))), row(set(userType("a", 1, "b", 2, "c", timestampInMicros))), row(set(userType("a", 1, "b", 3, "c", timestampInMicros))));
    assertRows(execute("SELECT {(" + type + "){a : pk, b : ck, c : WRITETIME(t)}} FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(set(userType("a", 1, "b", 3, "c", timestampInMicros))), row(set(userType("a", 1, "b", 2, "c", timestampInMicros))), row(set(userType("a", 1, "b", 1, "c", timestampInMicros))));
    assertRows(execute("SELECT (map<frozen<list<int>>, frozen<list<int>>>){[min(ck)]:[max(ck)]} FROM %s"), row(map(list(1), list(3))));
    assertRows(execute("SELECT (map<frozen<list<int>>, frozen<list<int>>>){[pk]: [ck]} FROM %s"), row(map(list(1), list(1))), row(map(list(1), list(2))), row(map(list(1), list(3))));
    assertRows(execute("SELECT (map<frozen<set<int>>, frozen<set<int>>>){{min(ck)} : {max(ck)}} FROM %s"), row(map(set(1), set(3))));
    assertRows(execute("SELECT (map<frozen<set<int>>, frozen<set<int>>>){{pk} : {ck}} FROM %s"), row(map(set(1), set(1))), row(map(set(1), set(2))), row(map(set(1), set(3))));
    assertRows(execute("SELECT (map<frozen<map<text, int>>, frozen<map<text, int>>>){{'min' : min(ck)} : {'max' : max(ck)}} FROM %s"), row(map(map("min", 1), map("max", 3))));
    assertRows(execute("SELECT (map<frozen<map<text, int>>, frozen<map<text, int>>>){{'pk' : pk} : {'ck' : ck}} FROM %s"), row(map(map("pk", 1), map("ck", 1))), row(map(map("pk", 1), map("ck", 2))), row(map(map("pk", 1), map("ck", 3))));
    assertRows(execute("SELECT (map<frozen<tuple<int, int>>, frozen<tuple<bigint>>>){(pk, ck) : (WRITETIME(t))} FROM %s"), row(map(tuple(1, 1), tuple(timestampInMicros))), row(map(tuple(1, 2), tuple(timestampInMicros))), row(map(tuple(1, 3), tuple(timestampInMicros))));
    assertRows(execute("SELECT (map<frozen<tuple<int>> , frozen<tuple<int>>>){(min(ck)) : (max(ck))} FROM %s"), row(map(tuple(1), tuple(3))));
    assertRows(execute("SELECT (map<int, frozen<" + type + ">>){ck : {a : min(ck), b: max(ck)}} FROM %s"), row(map(1, userType("a", 1, "b", 3, "c", null))));
    assertRows(execute("SELECT (map<int, frozen<" + type + ">>){ck : {a : pk, b : ck, c : WRITETIME(t)}} FROM %s"), row(map(1, userType("a", 1, "b", 1, "c", timestampInMicros))), row(map(2, userType("a", 1, "b", 2, "c", timestampInMicros))), row(map(3, userType("a", 1, "b", 3, "c", timestampInMicros))));
    assertRows(execute("SELECT (map<int, frozen<" + type + ">>){ck : {a : pk, b : ck, c : WRITETIME(t)}} FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(map(3, userType("a", 1, "b", 3, "c", timestampInMicros))), row(map(2, userType("a", 1, "b", 2, "c", timestampInMicros))), row(map(1, userType("a", 1, "b", 1, "c", timestampInMicros))));
    assertRows(execute("SELECT ([min(ck)], [max(ck)]) FROM %s"), row(tuple(list(1), list(3))));
    assertRows(execute("SELECT ([pk], [ck]) FROM %s"), row(tuple(list(1), list(1))), row(tuple(list(1), list(2))), row(tuple(list(1), list(3))));
    assertRows(execute("SELECT ({min(ck)}, {max(ck)}) FROM %s"), row(tuple(set(1), set(3))));
    assertRows(execute("SELECT ({pk}, {ck}) FROM %s"), row(tuple(set(1), set(1))), row(tuple(set(1), set(2))), row(tuple(set(1), set(3))));
    assertRows(execute("SELECT ((map<text, int>){'min' : min(ck)}, (map<text, int>){'max' : max(ck)}) FROM %s"), row(tuple(map("min", 1), map("max", 3))));
    assertRows(execute("SELECT ((map<text, int>){'pk' : pk}, (map<text, int>){'ck' : ck}) FROM %s"), row(tuple(map("pk", 1), map("ck", 1))), row(tuple(map("pk", 1), map("ck", 2))), row(tuple(map("pk", 1), map("ck", 3))));
    assertRows(execute("SELECT (tuple<tuple<int, int, bigint>>)((pk, ck, WRITETIME(t))) FROM %s"), row(tuple(tuple(1, 1, timestampInMicros))), row(tuple(tuple(1, 2, timestampInMicros))), row(tuple(tuple(1, 3, timestampInMicros))));
    assertRows(execute("SELECT (tuple<tuple<int, int, bigint>>)((min(ck), max(ck))) FROM %s"), row(tuple(tuple(1, 3))));
    assertRows(execute("SELECT ((t, WRITETIME(t)), (CAST(pk AS BIGINT), CAST(ck AS BIGINT))) FROM %s"), row(tuple(tuple("one", timestampInMicros), tuple(1L, 1L))), row(tuple(tuple("two", timestampInMicros), tuple(1L, 2L))), row(tuple(tuple("three", timestampInMicros), tuple(1L, 3L))));
    assertRows(execute("SELECT (tuple<" + type + ">)({a : min(ck), b: max(ck)}) FROM %s"), row(tuple(userType("a", 1, "b", 3, "c", null))));
    assertRows(execute("SELECT (tuple<" + type + ">)({a : pk, b : ck, c : WRITETIME(t)}) FROM %s"), row(tuple(userType("a", 1, "b", 1, "c", timestampInMicros))), row(tuple(userType("a", 1, "b", 2, "c", timestampInMicros))), row(tuple(userType("a", 1, "b", 3, "c", timestampInMicros))));
    assertRows(execute("SELECT (tuple<" + type + ">)({a : pk, b : ck, c : WRITETIME(t)}) FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(tuple(userType("a", 1, "b", 3, "c", timestampInMicros))), row(tuple(userType("a", 1, "b", 2, "c", timestampInMicros))), row(tuple(userType("a", 1, "b", 1, "c", timestampInMicros))));
    String containerType = createType("CREATE TYPE %s(l list<int>)");
    assertRows(execute("SELECT (" + containerType + "){l : [min(ck), max(ck)]} FROM %s"), row(userType("l", list(1, 3))));
    assertRows(execute("SELECT (" + containerType + "){l : [pk, ck]} FROM %s"), row(userType("l", list(1, 1))), row(userType("l", list(1, 2))), row(userType("l", list(1, 3))));
    containerType = createType("CREATE TYPE %s(s set<int>)");
    assertRows(execute("SELECT (" + containerType + "){s : {min(ck), max(ck)}} FROM %s"), row(userType("s", set(1, 3))));
    assertRows(execute("SELECT (" + containerType + "){s : {pk, ck}} FROM %s"), row(userType("s", set(1))), row(userType("s", set(1, 2))), row(userType("s", set(1, 3))));
    containerType = createType("CREATE TYPE %s(m map<text, int>)");
    assertRows(execute("SELECT (" + containerType + "){m : {'min' : min(ck), 'max' : max(ck)}} FROM %s"), row(userType("m", map("min", 1, "max", 3))));
    assertRows(execute("SELECT (" + containerType + "){m : {'pk' : pk, 'ck' : ck}} FROM %s"), row(userType("m", map("pk", 1, "ck", 1))), row(userType("m", map("pk", 1, "ck", 2))), row(userType("m", map("pk", 1, "ck", 3))));
    containerType = createType("CREATE TYPE %s(t tuple<int, int>, w tuple<bigint>)");
    assertRows(execute("SELECT (" + containerType + "){t : (pk, ck), w : (WRITETIME(t))} FROM %s"), row(userType("t", tuple(1, 1), "w", tuple(timestampInMicros))), row(userType("t", tuple(1, 2), "w", tuple(timestampInMicros))), row(userType("t", tuple(1, 3), "w", tuple(timestampInMicros))));
    containerType = createType("CREATE TYPE %s(t frozen<" + type + ">)");
    assertRows(execute("SELECT (" + containerType + "){t : {a : min(ck), b: max(ck)}} FROM %s"), row(userType("t", userType("a", 1, "b", 3, "c", null))));
    assertRows(execute("SELECT (" + containerType + "){t : {a : pk, b : ck, c : WRITETIME(t)}} FROM %s"), row(userType("t", userType("a", 1, "b", 1, "c", timestampInMicros))), row(userType("t", userType("a", 1, "b", 2, "c", timestampInMicros))), row(userType("t", userType("a", 1, "b", 3, "c", timestampInMicros))));
    assertRows(execute("SELECT (" + containerType + "){t : {a : pk, b : ck, c : WRITETIME(t)}} FROM %s WHERE pk = 1 ORDER BY ck DESC"), row(userType("t", userType("a", 1, "b", 3, "c", timestampInMicros))), row(userType("t", userType("a", 1, "b", 2, "c", timestampInMicros))), row(userType("t", userType("a", 1, "b", 1, "c", timestampInMicros))));
    assertInvalidMessage("Durations are not allowed inside sets: set<duration>", "SELECT pk, ck, (set<duration>){2d, 1mo} FROM %s");
    assertInvalidMessage("Invalid field selection: system.min(ck) of type int is not a user type", "SELECT min(ck).min FROM %s");
    assertInvalidMessage("Invalid field selection: (map<text, int>){'min': system.min(ck), 'max': system.max(ck)} of type frozen<map<text, int>> is not a user type", "SELECT (map<text, int>) {'min' : min(ck), 'max' : max(ck)}.min FROM %s");
}
1710167.631321cassandra
public void testFromJsonFct() throws Throwable {
    String typeName = createType("CREATE TYPE %s (a int, b uuid, c set<text>)");
    createTable("CREATE TABLE %s (" + "k int PRIMARY KEY, " + "asciival ascii, " + "bigintval bigint, " + "blobval blob, " + "booleanval boolean, " + "dateval date, " + "decimalval decimal, " + "doubleval double, " + "floatval float, " + "inetval inet, " + "intval int, " + "smallintval smallint, " + "textval text, " + "timeval time, " + "timestampval timestamp, " + "timeuuidval timeuuid, " + "tinyintval tinyint, " + "uuidval uuid," + "varcharval varchar, " + "varintval varint, " + "listval list<int>, " + "frozenlistval frozen<list<int>>, " + "setval set<uuid>, " + "frozensetval frozen<set<uuid>>, " + "mapval map<ascii, int>," + "frozenmapval frozen<map<ascii, int>>," + "tupleval frozen<tuple<int, ascii, uuid>>," + "udtval frozen<" + typeName + ">," + "durationval duration)");
    assertInvalidMessage("from_json() cannot be used in the selection clause", "SELECT from_json(textval) FROM %s", 0, 0);
    String func1 = createFunction(KEYSPACE, "int", "CREATE FUNCTION %s (a int) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS $$ return a.toString(); $$");
    createFunctionOverload(func1, "int", "CREATE FUNCTION %s (a text) CALLED ON NULL INPUT RETURNS text LANGUAGE java AS $$ return new String(a); $$");
    assertInvalidMessage("Ambiguous call to function", "INSERT INTO %s (k, textval) VALUES (?, " + func1 + "(from_json(?)))", 0, "123");
    assertInvalidMessage("Could not decode JSON string '\u038E\u0394\u03B4\u03E0'", "INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\u038E\u0394\u03B4\u03E0");
    execute("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, null);
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0), row(0, null));
    execute("INSERT INTO %s (k, frozenmapval) VALUES (?, from_json(?))", 0, null);
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, null));
    execute("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, null);
    assertRows(execute("SELECT k, udtval FROM %s WHERE k = ?", 0), row(0, null));
    execute("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii text\"");
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0), row(0, "ascii text"));
    execute("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii \\\" text\"");
    assertRows(execute("SELECT k, asciival FROM %s WHERE k = ?", 0), row(0, "ascii \" text"));
    assertInvalidMessage("Invalid ASCII character in string literal", "INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"\\u1fff\\u2013\\u33B4\\u2014\"");
    assertInvalidMessage("Expected an ascii string, but got a Integer", "INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "123");
    assertRows(execute("SELECT asciival FROM %s WHERE k = from_json(?)", "0"), row("ascii \" text"));
    execute("UPDATE %s SET asciival = from_json(?) WHERE k = from_json(?)", "\"ascii \\\" text\"", "0");
    execute("DELETE FROM %s WHERE k = from_json(?)", "0");
    execute("INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, bigintval FROM %s WHERE k = ?", 0), row(0, 123123123123L));
    execute("INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "\"123123123123\"");
    assertRows(execute("SELECT k, bigintval FROM %s WHERE k = ?", 0), row(0, 123123123123L));
    assertInvalidMessage("Expected a bigint value, but got a", "INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "9223372036854775808");
    assertInvalidMessage("Expected a bigint value, but got a", "INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "123.456");
    assertInvalidMessage("Unable to make long from", "INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "\"abc\"");
    assertInvalidMessage("Expected a bigint value, but got a", "INSERT INTO %s (k, bigintval) VALUES (?, from_json(?))", 0, "[\"abc\"]");
    execute("INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "\"0x00000001\"");
    assertRows(execute("SELECT k, blobval FROM %s WHERE k = ?", 0), row(0, ByteBufferUtil.bytes(1)));
    assertInvalidMessage("Value 'xyzz' is not a valid blob representation", "INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("String representation of blob is missing 0x prefix: 123", "INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "\"123\"");
    assertInvalidMessage("Value '0x123' is not a valid blob representation", "INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "\"0x123\"");
    assertInvalidMessage("Value '123' is not a valid blob representation", "INSERT INTO %s (k, blobval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "true");
    assertRows(execute("SELECT k, booleanval FROM %s WHERE k = ?", 0), row(0, true));
    execute("INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "false");
    assertRows(execute("SELECT k, booleanval FROM %s WHERE k = ?", 0), row(0, false));
    execute("INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "\"false\"");
    assertRows(execute("SELECT k, booleanval FROM %s WHERE k = ?", 0), row(0, false));
    assertInvalidMessage("Unable to make boolean from", "INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "\"abc\"");
    assertInvalidMessage("Expected a boolean value, but got a Integer", "INSERT INTO %s (k, booleanval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, dateval) VALUES (?, from_json(?))", 0, "\"1987-03-23\"");
    assertRows(execute("SELECT k, dateval FROM %s WHERE k = ?", 0), row(0, SimpleDateSerializer.dateStringToDays("1987-03-23")));
    assertInvalidMessage("Expected a string representation of a date", "INSERT INTO %s (k, dateval) VALUES (?, from_json(?))", 0, "123");
    assertInvalidMessage("Unable to coerce 'xyz' to a formatted date", "INSERT INTO %s (k, dateval) VALUES (?, from_json(?))", 0, "\"xyz\"");
    execute("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123.123123")));
    execute("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123")));
    execute("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "\"123123.123123\"");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("123123.123123")));
    execute("INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "\"-1.23E-12\"");
    assertRows(execute("SELECT k, decimalval FROM %s WHERE k = ?", 0), row(0, new BigDecimal("-1.23E-12")));
    assertInvalidMessage("Value 'xyzz' is not a valid representation of a decimal value", "INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Value 'true' is not a valid representation of a decimal value", "INSERT INTO %s (k, decimalval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, doubleval FROM %s WHERE k = ?", 0), row(0, 123123.123123d));
    execute("INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, doubleval FROM %s WHERE k = ?", 0), row(0, 123123.0d));
    execute("INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "\"123123\"");
    assertRows(execute("SELECT k, doubleval FROM %s WHERE k = ?", 0), row(0, 123123.0d));
    assertInvalidMessage("Unable to make double from", "INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected a double value, but got", "INSERT INTO %s (k, doubleval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "123123.123123");
    assertRows(execute("SELECT k, floatval FROM %s WHERE k = ?", 0), row(0, 123123.123123f));
    execute("INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, floatval FROM %s WHERE k = ?", 0), row(0, 123123.0f));
    execute("INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "\"123123.0\"");
    assertRows(execute("SELECT k, floatval FROM %s WHERE k = ?", 0), row(0, 123123.0f));
    assertInvalidMessage("Unable to make float from", "INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected a float value, but got a", "INSERT INTO %s (k, floatval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "\"127.0.0.1\"");
    assertRows(execute("SELECT k, inetval FROM %s WHERE k = ?", 0), row(0, InetAddress.getByName("127.0.0.1")));
    execute("INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "\"::1\"");
    assertRows(execute("SELECT k, inetval FROM %s WHERE k = ?", 0), row(0, InetAddress.getByName("::1")));
    assertInvalidMessage("Unable to make inet address from 'xyzz'", "INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected a string representation of an inet value, but got a Integer", "INSERT INTO %s (k, inetval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "123123");
    assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 123123));
    execute("INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "\"123123\"");
    assertRows(execute("SELECT k, intval FROM %s WHERE k = ?", 0), row(0, 123123));
    assertInvalidMessage("Expected an int value, but got a", "INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "2147483648");
    assertInvalidMessage("Expected an int value, but got a", "INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "123.456");
    assertInvalidMessage("Unable to make int from", "INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected an int value, but got a", "INSERT INTO %s (k, intval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "32767");
    assertRows(execute("SELECT k, smallintval FROM %s WHERE k = ?", 0), row(0, (short) 32767));
    execute("INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "\"32767\"");
    assertRows(execute("SELECT k, smallintval FROM %s WHERE k = ?", 0), row(0, (short) 32767));
    assertInvalidMessage("Unable to make short from", "INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "32768");
    assertInvalidMessage("Unable to make short from", "INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "123.456");
    assertInvalidMessage("Unable to make short from", "INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected a short value, but got a Boolean", "INSERT INTO %s (k, smallintval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "127");
    assertRows(execute("SELECT k, tinyintval FROM %s WHERE k = ?", 0), row(0, (byte) 127));
    execute("INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "\"127\"");
    assertRows(execute("SELECT k, tinyintval FROM %s WHERE k = ?", 0), row(0, (byte) 127));
    assertInvalidMessage("Unable to make byte from", "INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "128");
    assertInvalidMessage("Unable to make byte from", "INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "123.456");
    assertInvalidMessage("Unable to make byte from", "INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Expected a byte value, but got a Boolean", "INSERT INTO %s (k, tinyintval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, ""));
    execute("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"abcd\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "abcd"));
    execute("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"some \\\" text\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "some \" text"));
    execute("INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "\"\\u2013\"");
    assertRows(execute("SELECT k, textval FROM %s WHERE k = ?", 0), row(0, "\u2013"));
    assertInvalidMessage("Expected a UTF-8 string, but got a Integer", "INSERT INTO %s (k, textval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, timeval) VALUES (?, from_json(?))", 0, "\"07:35:07.000111222\"");
    assertRows(execute("SELECT k, timeval FROM %s WHERE k = ?", 0), row(0, TimeSerializer.timeStringToLong("07:35:07.000111222")));
    assertInvalidMessage("Expected a string representation of a time value", "INSERT INTO %s (k, timeval) VALUES (?, from_json(?))", 0, "123456");
    assertInvalidMessage("Unable to coerce 'xyz' to a formatted time", "INSERT INTO %s (k, timeval) VALUES (?, from_json(?))", 0, "\"xyz\"");
    execute("INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, timestampval FROM %s WHERE k = ?", 0), row(0, new Date(123123123123L)));
    execute("INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "\"2014-01-01\"");
    assertRows(execute("SELECT k, timestampval FROM %s WHERE k = ?", 0), row(0, new SimpleDateFormat("y-M-d").parse("2014-01-01")));
    assertInvalidMessage("Expected a long or a datestring representation of a timestamp value, but got a Double", "INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "123.456");
    assertInvalidMessage("Unable to parse a date/time from 'abcd'", "INSERT INTO %s (k, timestampval) VALUES (?, from_json(?))", 0, "\"abcd\"");
    execute("INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\"");
    assertRows(execute("SELECT k, timeuuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    execute("INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\"");
    assertRows(execute("SELECT k, timeuuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    assertInvalidMessage("TimeUUID supports only version 1 UUIDs", "INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "\"00000000-0000-0000-0000-000000000000\"");
    assertInvalidMessage("Expected a string representation of a timeuuid, but got a Integer", "INSERT INTO %s (k, timeuuidval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\"");
    assertRows(execute("SELECT k, uuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    execute("INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "\"6BDDC89A-5644-11E4-97FC-56847AFE9799\"");
    assertRows(execute("SELECT k, uuidval FROM %s WHERE k = ?", 0), row(0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    assertInvalidMessage("Unable to make UUID from", "INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "\"00000000-0000-0000-zzzz-000000000000\"");
    assertInvalidMessage("Expected a string representation of a uuid, but got a Integer", "INSERT INTO %s (k, uuidval) VALUES (?, from_json(?))", 0, "123");
    execute("INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "123123123123");
    assertRows(execute("SELECT k, varintval FROM %s WHERE k = ?", 0), row(0, new BigInteger("123123123123")));
    execute("INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "\"1234567890123456789012345678901234567890\"");
    assertRows(execute("SELECT k, varintval FROM %s WHERE k = ?", 0), row(0, new BigInteger("1234567890123456789012345678901234567890")));
    assertInvalidMessage("Value '123123.123' is not a valid representation of a varint value", "INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "123123.123");
    assertInvalidMessage("Value 'xyzz' is not a valid representation of a varint value", "INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "\"xyzz\"");
    assertInvalidMessage("Value '' is not a valid representation of a varint value", "INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "\"\"");
    assertInvalidMessage("Value 'true' is not a valid representation of a varint value", "INSERT INTO %s (k, varintval) VALUES (?, from_json(?))", 0, "true");
    execute("INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[1, 2, 3]");
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, list(1, 2, 3)));
    execute("INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[]");
    assertRows(execute("SELECT k, listval FROM %s WHERE k = ?", 0), row(0, null));
    assertInvalidMessage("Expected a list, but got a Integer", "INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "123");
    assertInvalidMessage("Unable to make int from", "INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[\"abc\"]");
    assertInvalidMessage("Invalid null element in list", "INSERT INTO %s (k, listval) VALUES (?, from_json(?))", 0, "[null]");
    execute("INSERT INTO %s (k, frozenlistval) VALUES (?, from_json(?))", 0, "[1, 2, 3]");
    assertRows(execute("SELECT k, frozenlistval FROM %s WHERE k = ?", 0), row(0, list(1, 2, 3)));
    execute("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    execute("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    execute("INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[]");
    assertRows(execute("SELECT k, setval FROM %s WHERE k = ?", 0), row(0, null));
    assertInvalidMessage("Expected a list (representing a set), but got a Integer", "INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "123");
    assertInvalidMessage("Unable to make UUID from", "INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[\"abc\"]");
    assertInvalidMessage("Invalid null element in set", "INSERT INTO %s (k, setval) VALUES (?, from_json(?))", 0, "[null]");
    execute("INSERT INTO %s (k, frozensetval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, frozensetval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    execute("INSERT INTO %s (k, frozensetval) VALUES (?, from_json(?))", 0, "[\"6bddc89a-5644-11e4-97fc-56847afe9799\", \"6bddc89a-5644-11e4-97fc-56847afe9798\"]");
    assertRows(execute("SELECT k, frozensetval FROM %s WHERE k = ?", 0), row(0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")))));
    execute("INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": 2}");
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2)));
    execute("INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{}");
    assertRows(execute("SELECT k, mapval FROM %s WHERE k = ?", 0), row(0, null));
    assertInvalidMessage("Expected a map, but got a Integer", "INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "123");
    assertInvalidMessage("Invalid ASCII character in string literal", "INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{\"\\u1fff\\u2013\\u33B4\\u2014\": 1}");
    assertInvalidMessage("Invalid null value in map", "INSERT INTO %s (k, mapval) VALUES (?, from_json(?))", 0, "{\"a\": null}");
    execute("INSERT INTO %s (k, frozenmapval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": 2}");
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2)));
    execute("INSERT INTO %s (k, frozenmapval) VALUES (?, from_json(?))", 0, "{\"b\": 2, \"a\": 1}");
    assertRows(execute("SELECT k, frozenmapval FROM %s WHERE k = ?", 0), row(0, map("a", 1, "b", 2)));
    execute("INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, \"foobar\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, tupleval FROM %s WHERE k = ?", 0), row(0, tuple(1, "foobar", UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    execute("INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, null, \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    assertRows(execute("SELECT k, tupleval FROM %s WHERE k = ?", 0), row(0, tuple(1, null, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertInvalidMessage("Tuple contains extra items", "INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, \"foobar\", \"6bddc89a-5644-11e4-97fc-56847afe9799\", 1, 2, 3]");
    assertInvalidMessage("Tuple is missing items", "INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[1, \"foobar\"]");
    assertInvalidMessage("Unable to make int from", "INSERT INTO %s (k, tupleval) VALUES (?, from_json(?))", 0, "[\"not an int\", \"foobar\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]");
    execute("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    execute("INSERT INTO %s (k, durationval) VALUES (?, from_json(?))", 0, "\"53us\"");
    assertRows(execute("SELECT k, durationval FROM %s WHERE k = ?", 0), row(0, Duration.newInstance(0, 0, 53000L)));
    execute("INSERT INTO %s (k, durationval) VALUES (?, from_json(?))", 0, "\"P2W\"");
    assertRows(execute("SELECT k, durationval FROM %s WHERE k = ?", 0), row(0, Duration.newInstance(0, 14, 0)));
    assertInvalidMessage("Unable to convert 'xyz' to a duration", "INSERT INTO %s (k, durationval) VALUES (?, from_json(?))", 0, "\"xyz\"");
    execute("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"a\": 1, \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    execute("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": null, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"foo\", \"bar\"]}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, null, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("bar", "foo")));
    execute("INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\"}");
    assertRows(execute("SELECT k, udtval.a, udtval.b, udtval.c FROM %s WHERE k = ?", 0), row(0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), null));
    assertInvalidMessage("Unknown field", "INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"xxx\": 1}");
    assertInvalidMessage("Unable to make int from", "INSERT INTO %s (k, udtval) VALUES (?, from_json(?))", 0, "{\"a\": \"foobar\"}");
}
179481.121315cassandra
public void testGroupByWithStaticColumnsWithoutPaging() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, s int static, d int, primary key (a, b, c))");
    execute("UPDATE %s SET s = 1 WHERE a = 1");
    execute("UPDATE %s SET s = 2 WHERE a = 2");
    execute("UPDATE %s SET s = 3 WHERE a = 4");
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a, b"), row(1, null, 1), row(2, null, 2), row(4, null, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b"), row(1, null, null, 1, null), row(2, null, null, 2, null), row(4, null, null, 3, null));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b LIMIT 2"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a PER PARTITION LIMIT 2"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(s) FROM %s GROUP BY a"), row(1, 1, 1L), row(2, 2, 1L), row(4, 3, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(s) FROM %s GROUP BY a LIMIT 2"), row(1, 1, 1L), row(2, 2, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a"), row(1, null, 1, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a, b"), row(1, null, 1, 0L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a = 1 GROUP BY a, b"), row(1, null, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a, b"), row(1, null, null, 1, null));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2"), row(1, null, 1, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 2"), row(1, null, 1, 0L, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(s) FROM %s WHERE a = 1 GROUP BY a"), row(1, 1, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, null, 1), row(2, null, 2), row(4, null, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, null, null, 1, null), row(2, null, null, 2, null), row(4, null, null, 3, null));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 2"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 2"), row(1, null, 1, 0L, 1L), row(2, null, 2, 0L, 1L), row(4, null, 3, 0L, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, 1, 1L), row(2, 2, 1L), row(4, 3, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"), row(1, 1, 1L), row(2, 2, 1L));
    execute("UPDATE %s SET s = 3 WHERE a = 3");
    execute("DELETE s FROM %s WHERE a = 4");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 1, 3)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 2, 2, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 3, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (1, 4, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (2, 2, 3, 3)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (2, 4, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (4, 8, 2, 12)");
    execute("INSERT INTO %s (a, b, c, d) VALUES (5, 8, 2, 12)");
    execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2");
    execute("DELETE FROM %s WHERE a = 5");
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a"), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b"), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a"), row(1, 2, 1), row(2, 2, 2), row(4, 8, null), row(3, null, 3));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a, b"), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(4, 8, null), row(3, null, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a"), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b"), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a LIMIT 2"), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(4, 8, null, 1L, 0L), row(3, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 3"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(4, 8, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a LIMIT 2"), row(1, 2, 1), row(2, 2, 2));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a, b LIMIT 10"), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(4, 8, null), row(3, null, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a LIMIT 2"), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b LIMIT 10"), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1), row(2, 2, 2), row(4, 8, null), row(3, null, 3));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(4, 8, 2, null, 12), row(3, null, null, 3, null));
    assertRows(execute("SELECT a, b, s FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"), row(1, 2, 1), row(2, 2, 2));
    assertRows(execute("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2"), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s GROUP BY a"), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L), row(4, null, 1L, 0L), row(3, 3, 1L, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s GROUP BY a LIMIT 2"), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L));
    assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN", "SELECT a, b, s, count(b), count(s) FROM %s GROUP BY a ORDER BY b DESC, c DESC");
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 1 GROUP BY a"), row(1, 2, 1, 4L, 4L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 3 GROUP BY a, b"), row(3, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 AND b = 2 GROUP BY a, b"), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a = 1 GROUP BY a"), row(1, 2, 1));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a = 4 GROUP BY a, b"), row(4, 8, null));
    assertRows(execute("SELECT * FROM %s WHERE a = 1 GROUP BY a"), row(1, 2, 1, 1, 3));
    assertRows(execute("SELECT * FROM %s WHERE a = 4 GROUP BY a, b"), row(4, 8, 2, null, 12));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b LIMIT 1"), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b PER PARTITION LIMIT 1"), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a = 2 GROUP BY a, b LIMIT 1"), row(2, 2, 2));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a = 2 GROUP BY a, b LIMIT 2"), row(2, 2, 2), row(2, 4, 2));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a = 2 GROUP BY a"), row(2, 2, 1L, 1L));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a = 4 GROUP BY a"), row(4, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC"), row(2, 4, 2, 1L, 1L), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 1"), row(2, 4, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a = 2 GROUP BY a, b ORDER BY b DESC, c DESC PER PARTITION LIMIT 1"), row(2, 4, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) AND b = 2 GROUP BY a, b"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, 2, 1), row(2, 2, 2), row(3, null, 3), row(4, 8, null));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(3, null, 3), row(4, 8, null));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, 2, 1, 1, 3), row(2, 2, 3, 2, 3), row(3, null, null, 3, null), row(4, 8, 2, null, 12));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b"), row(1, 2, 1, 1, 3), row(1, 4, 2, 1, 12), row(2, 2, 3, 2, 3), row(2, 4, 3, 2, 6), row(3, null, null, 3, null), row(4, 8, 2, null, 12));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"), row(1, 2, 1, 4L, 4L), row(2, 2, 2, 2L, 2L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 2"), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(2, 4, 2, 1L, 1L), row(3, null, 3, 0L, 1L), row(4, 8, null, 1L, 0L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 3"), row(1, 2, 1, 2L, 2L), row(2, 2, 2, 1L, 1L), row(3, null, 3, 0L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b PER PARTITION LIMIT 4 LIMIT 3"), row(1, 2, 1, 2L, 2L), row(1, 4, 1, 2L, 2L), row(2, 2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"), row(1, 2, 1), row(2, 2, 2));
    assertRows(execute("SELECT a, b, s FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a, b LIMIT 10"), row(1, 2, 1), row(1, 4, 1), row(2, 2, 2), row(2, 4, 2), row(3, null, 3), row(4, 8, null));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a"), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L), row(3, 3, 1L, 1L), row(4, null, 1L, 0L));
    assertRows(execute("SELECT DISTINCT a, s, count(a), count(s) FROM %s WHERE a IN (1, 2, 3, 4) GROUP BY a LIMIT 2"), row(1, 1, 1L, 1L), row(2, 2, 1L, 1L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC"), row(4, 8, null, 1L, 0L), row(1, 4, 1, 2L, 2L), row(2, 4, 2, 1L, 1L), row(2, 2, 2, 1L, 1L), row(1, 2, 1, 2L, 2L));
    assertRows(execute("SELECT a, b, s, count(b), count(s) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b ORDER BY b DESC, c DESC LIMIT 2"), row(4, 8, null, 1L, 0L), row(1, 4, 1, 2L, 2L));
}
178726.112304cassandra
public void testGroupByWithPaging() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, primary key (a, b, c, d))");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 1, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, 2, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 2, 3, 3, 6)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 4, 3, 6, 12)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (3, 3, 2, 12, 24)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (4, 8, 2, 12, 24)");
    execute("DELETE FROM %s WHERE a = 1 AND b = 3 AND c = 2 AND d = 12");
    execute("DELETE FROM %s WHERE a = 3");
    for (int pageSize = 1; pageSize < 10; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a", pageSize), row(1, 2, 6, 4L, 24), row(2, 2, 6, 2L, 12), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b", pageSize), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s", pageSize), row(1, 2, 6, 7L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 GROUP BY a, b ALLOW FILTERING", pageSize), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE b = 2 ALLOW FILTERING", pageSize), row(1, 2, 6, 3L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b, c", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b", pageSize), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b, c", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b", pageSize), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b LIMIT 2", pageSize), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s LIMIT 2", pageSize), row(1, 2, 6, 7L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 3", pageSize), row(1, 2, 6, 2L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s GROUP BY a, b PER PARTITION LIMIT 1 LIMIT 2", pageSize), row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b, c LIMIT 3", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b LIMIT 3", pageSize), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(2, 2, 3, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b, c LIMIT 3", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b LIMIT 3", pageSize), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1, 3, 6), row(2, 2, 3, 3, 6), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s GROUP BY a, b, c PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(2, 2, 3, 3, 6));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s GROUP BY a", pageSize), row(1, 1L), row(2, 1L), row(4, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s", pageSize), row(1, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s GROUP BY a LIMIT 2", pageSize), row(1, 1L), row(2, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s LIMIT 2", pageSize), row(1, 3L));
        assertInvalidMessage("ORDER BY is only supported when the partition key is restricted by an EQ or an IN", "SELECT a, b, c, count(b), max(e) FROM %s GROUP BY a, b ORDER BY b DESC, c DESC");
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1", pageSize), row(1, 2, 6, 4L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2 GROUP BY a, b, c", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 AND b = 2", pageSize), row(1, 2, 6, 2L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b", pageSize), row(1, 2, 1, 3), row(1, 4, 2, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b", pageSize), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a = 1 GROUP BY a", pageSize), row(1, 1L));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 10", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 LIMIT 2", pageSize), row(1, 2, 6, 4L, 24));
        assertRowsNet(executeNetWithPaging("SELECT count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 1", pageSize), row(1L, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 2", pageSize), row(1, 2, 1, 3), row(1, 4, 2, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1", pageSize), row(1, 2, 1, 3));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c LIMIT 2", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b LIMIT 1", pageSize), row(1, 2, 1, 3, 6));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b, c PER PARTITION LIMIT 2", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 1 GROUP BY a, b PER PARTITION LIMIT 1", pageSize), row(1, 2, 1, 3, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC", pageSize), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12), row(1, 2, 6, 1L, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 ORDER BY b DESC, c DESC", pageSize), row(1, 4, 24, 4L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC LIMIT 2", pageSize), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 ORDER BY b DESC, c DESC LIMIT 2", pageSize), row(1, 4, 24, 4L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a = 1 GROUP BY a, b, c ORDER BY b DESC, c DESC PER PARTITION LIMIT 2", pageSize), row(1, 4, 24, 2L, 24), row(1, 2, 12, 1L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4)", pageSize), row(1, 2, 6, 7L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2 GROUP BY a, b, c", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) AND b = 2", pageSize), row(1, 2, 6, 3L, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 2", pageSize), row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(2, 2, 6, 1L, 6), row(2, 4, 12, 1L, 12), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, e, count(b), max(e) FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c PER PARTITION LIMIT 1", pageSize), row(1, 2, 6, 1L, 6), row(2, 2, 6, 1L, 6), row(4, 8, 24, 1L, 24));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b", pageSize), row(1, 2, 1, 3), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
        assertRowsNet(executeNetWithPaging("SELECT a, b, c, d FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c", pageSize), row(1, 2, 1, 3), row(1, 2, 2, 6), row(1, 4, 2, 6), row(2, 2, 3, 3), row(2, 4, 3, 6), row(4, 8, 2, 12));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b, c", pageSize), row(1, 2, 1, 3, 6), row(1, 2, 2, 6, 12), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1, 2, 4) GROUP BY a, b", pageSize), row(1, 2, 1, 3, 6), row(1, 4, 2, 6, 12), row(2, 2, 3, 3, 6), row(2, 4, 3, 6, 12), row(4, 8, 2, 12, 24));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a", pageSize), row(1, 1L), row(2, 1L), row(4, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4)", pageSize), row(1, 3L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) GROUP BY a LIMIT 2", pageSize), row(1, 1L), row(2, 1L));
        assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, count(a)FROM %s WHERE a IN (1, 2, 4) LIMIT 2", pageSize), row(1, 3L));
    }
}
175629.641362elasticsearch
public void testParseOrientationOption() throws IOException, ParseException {
    XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "right").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-172.0).value(8.0).endArray().startArray().value(174.0).value(10.0).endArray().startArray().value(-172.0).value(-8.0).endArray().startArray().value(-172.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "ccw").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-172.0).value(8.0).endArray().startArray().value(174.0).value(10.0).endArray().startArray().value(-172.0).value(-8.0).endArray().startArray().value(-172.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "counterclockwise").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-172.0).value(8.0).endArray().startArray().value(174.0).value(10.0).endArray().startArray().value(-172.0).value(-8.0).endArray().startArray().value(-172.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "left").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-178.0).value(8.0).endArray().startArray().value(178.0).value(8.0).endArray().startArray().value(180.0).value(-8.0).endArray().startArray().value(-178.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
    polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "cw").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-178.0).value(8.0).endArray().startArray().value(178.0).value(8.0).endArray().startArray().value(180.0).value(-8.0).endArray().startArray().value(-178.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
    polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon").field("orientation", "clockwise").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-178.0).value(8.0).endArray().startArray().value(178.0).value(8.0).endArray().startArray().value(180.0).value(-8.0).endArray().startArray().value(-178.0).value(8.0).endArray().endArray().endArray().endObject();
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
}
1710836.661298gwt
protected void loadNameMap() {
    super.loadNameMap();
    namesMap.put("001", "World");
    namesMap.put("002", "Africa");
    namesMap.put("003", "North America");
    namesMap.put("005", "South America");
    namesMap.put("009", "Oceania");
    namesMap.put("011", "Western Africa");
    namesMap.put("013", "Central America");
    namesMap.put("014", "Eastern Africa");
    namesMap.put("015", "Northern Africa");
    namesMap.put("017", "Middle Africa");
    namesMap.put("018", "Southern Africa");
    namesMap.put("019", "Americas");
    namesMap.put("021", "Northern America");
    namesMap.put("029", "Caribbean");
    namesMap.put("030", "Eastern Asia");
    namesMap.put("034", "Southern Asia");
    namesMap.put("035", "Southeast Asia");
    namesMap.put("039", "Southern Europe");
    namesMap.put("053", "Australasia");
    namesMap.put("054", "Melanesia");
    namesMap.put("057", "Micronesian Region");
    namesMap.put("061", "Polynesia");
    namesMap.put("142", "Asia");
    namesMap.put("143", "Central Asia");
    namesMap.put("145", "Western Asia");
    namesMap.put("150", "Europe");
    namesMap.put("151", "Eastern Europe");
    namesMap.put("154", "Northern Europe");
    namesMap.put("155", "Western Europe");
    namesMap.put("202", "Sub-Saharan Africa");
    namesMap.put("419", "Latin America");
    namesMap.put("AC", "Ascension Island");
    namesMap.put("AD", "Andorra");
    namesMap.put("AE", "United Arab Emirates");
    namesMap.put("AF", "Afghanistan");
    namesMap.put("AG", "Antigua & Barbuda");
    namesMap.put("AI", "Anguilla");
    namesMap.put("AL", "Albania");
    namesMap.put("AM", "Armenia");
    namesMap.put("AO", "Angola");
    namesMap.put("AQ", "Antarctica");
    namesMap.put("AR", "Argentina");
    namesMap.put("AS", "American Samoa");
    namesMap.put("AT", "Austria");
    namesMap.put("AU", "Australia");
    namesMap.put("AW", "Aruba");
    namesMap.put("AX", "Ã…land Islands");
    namesMap.put("AZ", "Azerbaijan");
    namesMap.put("BA", "Bosnia & Herzegovina");
    namesMap.put("BB", "Barbados");
    namesMap.put("BD", "Bangladesh");
    namesMap.put("BE", "Belgium");
    namesMap.put("BF", "Burkina Faso");
    namesMap.put("BG", "Bulgaria");
    namesMap.put("BH", "Bahrain");
    namesMap.put("BI", "Burundi");
    namesMap.put("BJ", "Benin");
    namesMap.put("BL", "St. Barthélemy");
    namesMap.put("BM", "Bermuda");
    namesMap.put("BN", "Brunei");
    namesMap.put("BO", "Bolivia");
    namesMap.put("BQ", "Caribbean Netherlands");
    namesMap.put("BR", "Brazil");
    namesMap.put("BS", "Bahamas");
    namesMap.put("BT", "Bhutan");
    namesMap.put("BV", "Bouvet Island");
    namesMap.put("BW", "Botswana");
    namesMap.put("BY", "Belarus");
    namesMap.put("BZ", "Belize");
    namesMap.put("CA", "Canada");
    namesMap.put("CC", "Cocos (Keeling) Islands");
    namesMap.put("CD", "Congo - Kinshasa");
    namesMap.put("CF", "Central African Republic");
    namesMap.put("CG", "Congo - Brazzaville");
    namesMap.put("CH", "Switzerland");
    namesMap.put("CI", "Côte d’Ivoire");
    namesMap.put("CK", "Cook Islands");
    namesMap.put("CL", "Chile");
    namesMap.put("CM", "Cameroon");
    namesMap.put("CN", "China");
    namesMap.put("CO", "Colombia");
    namesMap.put("CP", "Clipperton Island");
    namesMap.put("CR", "Costa Rica");
    namesMap.put("CU", "Cuba");
    namesMap.put("CV", "Cape Verde");
    namesMap.put("CW", "Curaçao");
    namesMap.put("CX", "Christmas Island");
    namesMap.put("CY", "Cyprus");
    namesMap.put("CZ", "Czechia");
    namesMap.put("DE", "Germany");
    namesMap.put("DG", "Diego Garcia");
    namesMap.put("DJ", "Djibouti");
    namesMap.put("DK", "Denmark");
    namesMap.put("DM", "Dominica");
    namesMap.put("DO", "Dominican Republic");
    namesMap.put("DZ", "Algeria");
    namesMap.put("EA", "Ceuta & Melilla");
    namesMap.put("EC", "Ecuador");
    namesMap.put("EE", "Estonia");
    namesMap.put("EG", "Egypt");
    namesMap.put("EH", "Western Sahara");
    namesMap.put("ER", "Eritrea");
    namesMap.put("ES", "Spain");
    namesMap.put("ET", "Ethiopia");
    namesMap.put("EU", "European Union");
    namesMap.put("EZ", "Eurozone");
    namesMap.put("FI", "Finland");
    namesMap.put("FJ", "Fiji");
    namesMap.put("FK", "Falkland Islands");
    namesMap.put("FM", "Micronesia");
    namesMap.put("FO", "Faroe Islands");
    namesMap.put("FR", "France");
    namesMap.put("GA", "Gabon");
    namesMap.put("GB", "United Kingdom");
    namesMap.put("GD", "Grenada");
    namesMap.put("GE", "Georgia");
    namesMap.put("GF", "French Guiana");
    namesMap.put("GG", "Guernsey");
    namesMap.put("GH", "Ghana");
    namesMap.put("GI", "Gibraltar");
    namesMap.put("GL", "Greenland");
    namesMap.put("GM", "Gambia");
    namesMap.put("GN", "Guinea");
    namesMap.put("GP", "Guadeloupe");
    namesMap.put("GQ", "Equatorial Guinea");
    namesMap.put("GR", "Greece");
    namesMap.put("GS", "South Georgia & South Sandwich Islands");
    namesMap.put("GT", "Guatemala");
    namesMap.put("GU", "Guam");
    namesMap.put("GW", "Guinea-Bissau");
    namesMap.put("GY", "Guyana");
    namesMap.put("HK", "Hong Kong SAR China");
    namesMap.put("HM", "Heard & McDonald Islands");
    namesMap.put("HN", "Honduras");
    namesMap.put("HR", "Croatia");
    namesMap.put("HT", "Haiti");
    namesMap.put("HU", "Hungary");
    namesMap.put("IC", "Canary Islands");
    namesMap.put("ID", "Indonesia");
    namesMap.put("IE", "Ireland");
    namesMap.put("IL", "Israel");
    namesMap.put("IM", "Isle of Man");
    namesMap.put("IN", "India");
    namesMap.put("IO", "British Indian Ocean Territory");
    namesMap.put("IQ", "Iraq");
    namesMap.put("IR", "Iran");
    namesMap.put("IS", "Iceland");
    namesMap.put("IT", "Italy");
    namesMap.put("JE", "Jersey");
    namesMap.put("JM", "Jamaica");
    namesMap.put("JO", "Jordan");
    namesMap.put("JP", "Japan");
    namesMap.put("KE", "Kenya");
    namesMap.put("KG", "Kyrgyzstan");
    namesMap.put("KH", "Cambodia");
    namesMap.put("KI", "Kiribati");
    namesMap.put("KM", "Comoros");
    namesMap.put("KN", "St. Kitts & Nevis");
    namesMap.put("KP", "North Korea");
    namesMap.put("KR", "South Korea");
    namesMap.put("KW", "Kuwait");
    namesMap.put("KY", "Cayman Islands");
    namesMap.put("KZ", "Kazakhstan");
    namesMap.put("LA", "Laos");
    namesMap.put("LB", "Lebanon");
    namesMap.put("LC", "St. Lucia");
    namesMap.put("LI", "Liechtenstein");
    namesMap.put("LK", "Sri Lanka");
    namesMap.put("LR", "Liberia");
    namesMap.put("LS", "Lesotho");
    namesMap.put("LT", "Lithuania");
    namesMap.put("LU", "Luxembourg");
    namesMap.put("LV", "Latvia");
    namesMap.put("LY", "Libya");
    namesMap.put("MA", "Morocco");
    namesMap.put("MC", "Monaco");
    namesMap.put("MD", "Moldova");
    namesMap.put("ME", "Montenegro");
    namesMap.put("MF", "St. Martin");
    namesMap.put("MG", "Madagascar");
    namesMap.put("MH", "Marshall Islands");
    namesMap.put("MK", "Macedonia");
    namesMap.put("ML", "Mali");
    namesMap.put("MM", "Myanmar (Burma)");
    namesMap.put("MN", "Mongolia");
    namesMap.put("MO", "Macau SAR China");
    namesMap.put("MP", "Northern Mariana Islands");
    namesMap.put("MQ", "Martinique");
    namesMap.put("MR", "Mauritania");
    namesMap.put("MS", "Montserrat");
    namesMap.put("MT", "Malta");
    namesMap.put("MU", "Mauritius");
    namesMap.put("MV", "Maldives");
    namesMap.put("MW", "Malawi");
    namesMap.put("MX", "Mexico");
    namesMap.put("MY", "Malaysia");
    namesMap.put("MZ", "Mozambique");
    namesMap.put("NA", "Namibia");
    namesMap.put("NC", "New Caledonia");
    namesMap.put("NE", "Niger");
    namesMap.put("NF", "Norfolk Island");
    namesMap.put("NG", "Nigeria");
    namesMap.put("NI", "Nicaragua");
    namesMap.put("NL", "Netherlands");
    namesMap.put("NO", "Norway");
    namesMap.put("NP", "Nepal");
    namesMap.put("NR", "Nauru");
    namesMap.put("NU", "Niue");
    namesMap.put("NZ", "New Zealand");
    namesMap.put("OM", "Oman");
    namesMap.put("PA", "Panama");
    namesMap.put("PE", "Peru");
    namesMap.put("PF", "French Polynesia");
    namesMap.put("PG", "Papua New Guinea");
    namesMap.put("PH", "Philippines");
    namesMap.put("PK", "Pakistan");
    namesMap.put("PL", "Poland");
    namesMap.put("PM", "St. Pierre & Miquelon");
    namesMap.put("PN", "Pitcairn Islands");
    namesMap.put("PR", "Puerto Rico");
    namesMap.put("PS", "Palestinian Territories");
    namesMap.put("PT", "Portugal");
    namesMap.put("PW", "Palau");
    namesMap.put("PY", "Paraguay");
    namesMap.put("QA", "Qatar");
    namesMap.put("QO", "Outlying Oceania");
    namesMap.put("RE", "Réunion");
    namesMap.put("RO", "Romania");
    namesMap.put("RS", "Serbia");
    namesMap.put("RU", "Russia");
    namesMap.put("RW", "Rwanda");
    namesMap.put("SA", "Saudi Arabia");
    namesMap.put("SB", "Solomon Islands");
    namesMap.put("SC", "Seychelles");
    namesMap.put("SD", "Sudan");
    namesMap.put("SE", "Sweden");
    namesMap.put("SG", "Singapore");
    namesMap.put("SH", "St. Helena");
    namesMap.put("SI", "Slovenia");
    namesMap.put("SJ", "Svalbard & Jan Mayen");
    namesMap.put("SK", "Slovakia");
    namesMap.put("SL", "Sierra Leone");
    namesMap.put("SM", "San Marino");
    namesMap.put("SN", "Senegal");
    namesMap.put("SO", "Somalia");
    namesMap.put("SR", "Suriname");
    namesMap.put("SS", "South Sudan");
    namesMap.put("ST", "São Tomé & Príncipe");
    namesMap.put("SV", "El Salvador");
    namesMap.put("SX", "Sint Maarten");
    namesMap.put("SY", "Syria");
    namesMap.put("SZ", "Swaziland");
    namesMap.put("TA", "Tristan da Cunha");
    namesMap.put("TC", "Turks & Caicos Islands");
    namesMap.put("TD", "Chad");
    namesMap.put("TF", "French Southern Territories");
    namesMap.put("TG", "Togo");
    namesMap.put("TH", "Thailand");
    namesMap.put("TJ", "Tajikistan");
    namesMap.put("TK", "Tokelau");
    namesMap.put("TL", "Timor-Leste");
    namesMap.put("TM", "Turkmenistan");
    namesMap.put("TN", "Tunisia");
    namesMap.put("TO", "Tonga");
    namesMap.put("TR", "Turkey");
    namesMap.put("TT", "Trinidad & Tobago");
    namesMap.put("TV", "Tuvalu");
    namesMap.put("TW", "Taiwan");
    namesMap.put("TZ", "Tanzania");
    namesMap.put("UA", "Ukraine");
    namesMap.put("UG", "Uganda");
    namesMap.put("UM", "U.S. Outlying Islands");
    namesMap.put("UN", "United Nations");
    namesMap.put("US", "United States");
    namesMap.put("UY", "Uruguay");
    namesMap.put("UZ", "Uzbekistan");
    namesMap.put("VA", "Vatican City");
    namesMap.put("VC", "St. Vincent & Grenadines");
    namesMap.put("VE", "Venezuela");
    namesMap.put("VG", "British Virgin Islands");
    namesMap.put("VI", "U.S. Virgin Islands");
    namesMap.put("VN", "Vietnam");
    namesMap.put("VU", "Vanuatu");
    namesMap.put("WF", "Wallis & Futuna");
    namesMap.put("WS", "Samoa");
    namesMap.put("XA", "Pseudo-Accents");
    namesMap.put("XB", "Pseudo-Bidi");
    namesMap.put("XK", "Kosovo");
    namesMap.put("YE", "Yemen");
    namesMap.put("YT", "Mayotte");
    namesMap.put("ZA", "South Africa");
    namesMap.put("ZM", "Zambia");
    namesMap.put("ZW", "Zimbabwe");
    namesMap.put("ZZ", "Unknown Region");
}
1713264.371276hadoop
public void testBasic() throws IOException, URISyntaxException, InterruptedException, MissingEventsException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
    MiniQJMHACluster.Builder builder = new MiniQJMHACluster.Builder(conf);
    builder.getDfsBuilder().numDataNodes(2);
    MiniQJMHACluster cluster = builder.build();
    try {
        cluster.getDfsCluster().waitActive();
        cluster.getDfsCluster().transitionToActive(0);
        DFSClient client = new DFSClient(cluster.getDfsCluster().getNameNode(0).getNameNodeAddress(), conf);
        FileSystem fs = cluster.getDfsCluster().getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
        DFSInotifyEventInputStream eis = client.getInotifyEventStream();
        client.rename("/file", "/file4", null);
        client.rename("/file4", "/file2");
        OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        os.close();
        os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
        os.write(new byte[BLOCK_SIZE]);
        os.close();
        Thread.sleep(10);
        client.open("/file2").read(new byte[1]);
        client.setReplication("/file2", (short) 1);
        client.concat("/file2", new String[] { "/file3" });
        client.delete("/file2", false);
        client.mkdirs("/dir", null, false);
        client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
        client.setOwner("/dir", "username", "groupname");
        client.createSymlink("/dir", "/dir2", false);
        client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(XAttrSetFlag.CREATE));
        client.removeXAttr("/file5", "user.field");
        client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
        client.removeAcl("/file5");
        client.rename("/file5", "/dir");
        client.truncate("/truncate_file", BLOCK_SIZE);
        client.create("/file_ec_test1", false);
        EventBatch batch = null;
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        long txid = batch.getTxid();
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
        Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
        Assert.assertEquals("/file4", re.getDstPath());
        Assert.assertEquals("/file", re.getSrcPath());
        Assert.assertTrue(re.getTimestamp() > 0);
        LOG.info(re.toString());
        Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
        long eventsBehind = eis.getTxidsBehindEstimate();
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
        Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
        Assert.assertTrue(re2.getDstPath().equals("/file2"));
        Assert.assertTrue(re2.getSrcPath().equals("/file4"));
        Assert.assertTrue(re2.getTimestamp() > 0);
        LOG.info(re2.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
        Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
        Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
        Assert.assertTrue(ce.getPath().equals("/file2"));
        Assert.assertTrue(ce.getCtime() > 0);
        Assert.assertTrue(ce.getReplication() > 0);
        Assert.assertTrue(ce.getSymlinkTarget() == null);
        Assert.assertTrue(ce.getOverwrite());
        Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
        Assert.assertTrue(ce.isErasureCoded().isPresent());
        Assert.assertFalse(ce.isErasureCoded().get());
        LOG.info(ce.toString());
        Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
        Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
        Assert.assertTrue(ce2.getPath().equals("/file2"));
        Assert.assertTrue(ce2.getFileSize() > 0);
        Assert.assertTrue(ce2.getTimestamp() > 0);
        LOG.info(ce2.toString());
        Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
        Event.AppendEvent append2 = (Event.AppendEvent) batch.getEvents()[0];
        Assert.assertEquals("/file2", append2.getPath());
        Assert.assertFalse(append2.toNewBlock());
        LOG.info(append2.toString());
        Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue.getPath().equals("/file2"));
        Assert.assertTrue(mue.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.TIMES);
        LOG.info(mue.toString());
        Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue2.getPath().equals("/file2"));
        Assert.assertTrue(mue2.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.REPLICATION);
        Assert.assertTrue(mue2.getReplication() == 1);
        LOG.info(mue2.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(3, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
        Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
        Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
        Assert.assertTrue(ue2.getPath().equals("/file3"));
        Assert.assertTrue(ue2.getTimestamp() > 0);
        LOG.info(ue2.toString());
        Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
        Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
        Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
        Assert.assertTrue(ce3.getPath().equals("/file2"));
        Assert.assertTrue(ce3.getTimestamp() > 0);
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
        Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
        Assert.assertTrue(ue.getPath().equals("/file2"));
        Assert.assertTrue(ue.getTimestamp() > 0);
        LOG.info(ue.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
        Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
        Assert.assertTrue(ce4.getiNodeType() == Event.CreateEvent.INodeType.DIRECTORY);
        Assert.assertTrue(ce4.getPath().equals("/dir"));
        Assert.assertTrue(ce4.getCtime() > 0);
        Assert.assertTrue(ce4.getReplication() == 0);
        Assert.assertTrue(ce4.getSymlinkTarget() == null);
        LOG.info(ce4.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue3.getPath().equals("/dir"));
        Assert.assertTrue(mue3.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.PERMS);
        Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
        LOG.info(mue3.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue4.getPath().equals("/dir"));
        Assert.assertTrue(mue4.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.OWNER);
        Assert.assertTrue(mue4.getOwnerName().equals("username"));
        Assert.assertTrue(mue4.getGroupName().equals("groupname"));
        LOG.info(mue4.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
        Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
        Assert.assertTrue(ce5.getiNodeType() == Event.CreateEvent.INodeType.SYMLINK);
        Assert.assertTrue(ce5.getPath().equals("/dir2"));
        Assert.assertTrue(ce5.getCtime() > 0);
        Assert.assertTrue(ce5.getReplication() == 0);
        Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
        LOG.info(ce5.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue5.getPath().equals("/file5"));
        Assert.assertTrue(mue5.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.XATTRS);
        Assert.assertTrue(mue5.getxAttrs().size() == 1);
        Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
        Assert.assertTrue(!mue5.isxAttrsRemoved());
        LOG.info(mue5.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue6.getPath().equals("/file5"));
        Assert.assertTrue(mue6.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.XATTRS);
        Assert.assertTrue(mue6.getxAttrs().size() == 1);
        Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
        Assert.assertTrue(mue6.isxAttrsRemoved());
        LOG.info(mue6.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue7.getPath().equals("/file5"));
        Assert.assertTrue(mue7.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.ACLS);
        Assert.assertTrue(mue7.getAcls().contains(AclEntry.parseAclEntry("user::rwx", true)));
        LOG.info(mue7.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
        Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
        Assert.assertTrue(mue8.getPath().equals("/file5"));
        Assert.assertTrue(mue8.getMetadataType() == Event.MetadataUpdateEvent.MetadataType.ACLS);
        Assert.assertTrue(mue8.getAcls() == null);
        LOG.info(mue8.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
        Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
        Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
        Assert.assertTrue(re3.getSrcPath().equals("/file5"));
        Assert.assertTrue(re3.getTimestamp() > 0);
        LOG.info(re3.toString());
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.TRUNCATE);
        Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
        Assert.assertTrue(et.getPath().equals("/truncate_file"));
        Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
        Assert.assertTrue(et.getTimestamp() > 0);
        LOG.info(et.toString());
        Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
        batch = waitForNextEvents(eis);
        Assert.assertEquals(1, batch.getEvents().length);
        txid = checkTxid(batch, txid);
        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
        ce = (Event.CreateEvent) batch.getEvents()[0];
        Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
        Assert.assertTrue(ce.getPath().equals("/file_ec_test1"));
        Assert.assertTrue(ce.getCtime() > 0);
        Assert.assertTrue(ce.getReplication() > 0);
        Assert.assertTrue(ce.getSymlinkTarget() == null);
        Assert.assertFalse(ce.getOverwrite());
        Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
        Assert.assertTrue(ce.isErasureCoded().isPresent());
        Assert.assertFalse(ce.isErasureCoded().get());
        LOG.info(ce.toString());
        Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
        Assert.assertTrue(eis.poll() == null);
        Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
    } finally {
        cluster.shutdown();
    }
}
175361.582385hadoop
public void testHumanPrinterAll() throws Exception {
    JobHistoryParser.JobInfo job = createJobInfo();
    HumanReadableHistoryViewerPrinter printer = new HumanReadableHistoryViewerPrinter(job, true, "http://", TimeZone.getTimeZone("GMT"));
    String outStr = run(printer);
    if (System.getProperty("java.version").startsWith("1.7")) {
        assertEqualLines("\n" + "Hadoop job: job_1317928501754_0001\n" + "=====================================\n" + "User: rkanter\n" + "JobName: my job\n" + "JobConf: /tmp/job.xml\n" + "Submitted At: 6-Oct-2011 19:15:01\n" + "Launched At: 6-Oct-2011 19:15:02 (1sec)\n" + "Finished At: 6-Oct-2011 19:15:16 (14sec)\n" + "Status: SUCCEEDED\n" + "Counters: \n" + "\n" + "|Group Name                    |Counter name                  |Map Value |Reduce Value|Total Value|\n" + "---------------------------------------------------------------------------------------" + LINE_SEPARATOR + "|group1                        |counter1                      |5         |5         |5         " + LINE_SEPARATOR + "|group1                        |counter2                      |10        |10        |10        " + LINE_SEPARATOR + "|group2                        |counter1                      |15        |15        |15        \n" + "\n" + "=====================================" + LINE_SEPARATOR + "\n" + "Task Summary\n" + "============================\n" + "Kind\tTotal\tSuccessful\tFailed\tKilled\tStartTime\tFinishTime\n" + "\n" + "Setup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\n" + "Map\t6\t5\t\t1\t0\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:16 (12sec)\n" + "Reduce\t1\t1\t\t0\t0\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\n" + "Cleanup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\n" + "============================\n" + LINE_SEPARATOR + "\n" + "Analysis" + LINE_SEPARATOR + "=========" + LINE_SEPARATOR + "\n" + "Time taken by best performing map task task_1317928501754_0001_m_000003: 3sec\n" + "Average time taken by map tasks: 5sec\n" + "Worse performing map tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_m_000007 7sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000006 6sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000005 5sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000004 4sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000003 3sec" + LINE_SEPARATOR + "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)" + LINE_SEPARATOR + "\n" + "Time taken by best performing shuffle task task_1317928501754_0001_r_000008: 8sec\n" + "Average time taken by shuffle tasks: 8sec\n" + "Worse performing shuffle tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008 8sec" + LINE_SEPARATOR + "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + LINE_SEPARATOR + "\n" + "Time taken by best performing reduce task task_1317928501754_0001_r_000008: 0sec\n" + "Average time taken by reduce tasks: 0sec\n" + "Worse performing reduce tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008 0sec" + LINE_SEPARATOR + "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + LINE_SEPARATOR + "=========" + LINE_SEPARATOR + "\n" + "FAILED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000006\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\t\t\n" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000005\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\t\t\n" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000004\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\t\t\n" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000003\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\t\t\n" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000007\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\t\t\n" + LINE_SEPARATOR + "\n" + "SUCCEEDED REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t" + LINE_SEPARATOR + "\n" + "JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1" + LINE_SEPARATOR + "\n" + "MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000002_1\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000006_1\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000005_1\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000004_1\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000003_1\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1\n" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000007_1\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1\n" + LINE_SEPARATOR + "\n" + "REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tShuffleFinished\tSortFinished\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1" + LINE_SEPARATOR + "\n" + "JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1" + LINE_SEPARATOR + "\n" + "FAILED task attempts by nodes\n" + "Hostname\tFailedTasks\n" + "===============================" + LINE_SEPARATOR + "localhost\ttask_1317928501754_0001_m_000002, " + LINE_SEPARATOR, outStr);
    } else {
        assertEqualLines("\n" + "Hadoop job: job_1317928501754_0001\n" + "=====================================\n" + "User: rkanter\n" + "JobName: my job\n" + "JobConf: /tmp/job.xml\n" + "Submitted At: 6-Oct-2011 19:15:01\n" + "Launched At: 6-Oct-2011 19:15:02 (1sec)\n" + "Finished At: 6-Oct-2011 19:15:16 (14sec)\n" + "Status: SUCCEEDED\n" + "Counters: \n" + "\n" + "|Group Name                    |Counter name                  |Map Value |Reduce Value|Total Value|\n" + "---------------------------------------------------------------------------------------" + LINE_SEPARATOR + "|group1                        |counter1                      |5         |5         |5         " + LINE_SEPARATOR + "|group1                        |counter2                      |10        |10        |10        " + LINE_SEPARATOR + "|group2                        |counter1                      |15        |15        |15        \n" + "\n" + "=====================================" + LINE_SEPARATOR + "\n" + "Task Summary\n" + "============================\n" + "Kind\tTotal\tSuccessful\tFailed\tKilled\tStartTime\tFinishTime\n" + "\n" + "Setup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\n" + "Map\t6\t5\t\t1\t0\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:16 (12sec)\n" + "Reduce\t1\t1\t\t0\t0\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\n" + "Cleanup\t1\t1\t\t0\t0\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\n" + "============================\n" + LINE_SEPARATOR + "\n" + "Analysis" + LINE_SEPARATOR + "=========" + LINE_SEPARATOR + "\n" + "Time taken by best performing map task task_1317928501754_0001_m_000003: 3sec\n" + "Average time taken by map tasks: 5sec\n" + "Worse performing map tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_m_000007 7sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000006 6sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000005 5sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000004 4sec" + LINE_SEPARATOR + "task_1317928501754_0001_m_000003 3sec" + LINE_SEPARATOR + "The last map task task_1317928501754_0001_m_000007 finished at (relative to the Job launch time): 6-Oct-2011 19:15:16 (14sec)" + LINE_SEPARATOR + "\n" + "Time taken by best performing shuffle task task_1317928501754_0001_r_000008: 8sec\n" + "Average time taken by shuffle tasks: 8sec\n" + "Worse performing shuffle tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008 8sec" + LINE_SEPARATOR + "The last shuffle task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + LINE_SEPARATOR + "\n" + "Time taken by best performing reduce task task_1317928501754_0001_r_000008: 0sec\n" + "Average time taken by reduce tasks: 0sec\n" + "Worse performing reduce tasks: \n" + "TaskId\t\tTimetaken" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008 0sec" + LINE_SEPARATOR + "The last reduce task task_1317928501754_0001_r_000008 finished at (relative to the Job launch time): 6-Oct-2011 19:15:18 (16sec)" + LINE_SEPARATOR + "=========" + LINE_SEPARATOR + "\n" + "FAILED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000002\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_s_000001\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000007\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000006\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000005\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000004\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\tInputSplits\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_m_000003\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\t\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_r_000008\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t" + LINE_SEPARATOR + "\n" + "SUCCEEDED JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tError\n" + "====================================================" + LINE_SEPARATOR + "task_1317928501754_0001_c_000009\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\t" + LINE_SEPARATOR + "\n" + "JOB_SETUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_s_000001_1\t6-Oct-2011 19:15:03\t6-Oct-2011 19:15:04 (1sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1" + LINE_SEPARATOR + "\n" + "MAP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000007_1\t6-Oct-2011 19:15:09\t6-Oct-2011 19:15:16 (7sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000002_1\t6-Oct-2011 19:15:04\t6-Oct-2011 19:15:06 (2sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000006_1\t6-Oct-2011 19:15:08\t6-Oct-2011 19:15:14 (6sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000005_1\t6-Oct-2011 19:15:07\t6-Oct-2011 19:15:12 (5sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000004_1\t6-Oct-2011 19:15:06\t6-Oct-2011 19:15:10 (4sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1" + LINE_SEPARATOR + "attempt_1317928501754_0001_m_000003_1\t6-Oct-2011 19:15:05\t6-Oct-2011 19:15:08 (3sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1" + LINE_SEPARATOR + "\n" + "REDUCE task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tShuffleFinished\tSortFinished\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_r_000008_1\t6-Oct-2011 19:15:10\t6-Oct-2011 19:15:18 (8sec)\t6-Oct-2011 19:15:18 (0sec)6-Oct-2011 19:15:18 (8sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1" + LINE_SEPARATOR + "\n" + "JOB_CLEANUP task list for job_1317928501754_0001\n" + "TaskId\t\tStartTime\tFinishTime\tHostName\tError\tTaskLogs\n" + "====================================================" + LINE_SEPARATOR + "attempt_1317928501754_0001_c_000009_1\t6-Oct-2011 19:15:11\t6-Oct-2011 19:15:20 (9sec)\tlocalhost\thttp://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1" + LINE_SEPARATOR + "\n" + "FAILED task attempts by nodes\n" + "Hostname\tFailedTasks\n" + "===============================" + LINE_SEPARATOR + "localhost\ttask_1317928501754_0001_m_000002, " + LINE_SEPARATOR, outStr);
    }
}
175479.431373hadoop
public void testJSONPrinterAll() throws Exception {
    JobHistoryParser.JobInfo job = createJobInfo();
    JSONHistoryViewerPrinter printer = new JSONHistoryViewerPrinter(job, true, "http://");
    String outStr = run(printer);
    JSONAssert.assertEquals("{\n" + "    \"counters\": {\n" + "        \"group1\": [\n" + "            {\n" + "                \"counterName\": \"counter1\",\n" + "                \"mapValue\": 5,\n" + "                \"reduceValue\": 5,\n" + "                \"totalValue\": 5\n" + "            },\n" + "            {\n" + "                \"counterName\": \"counter2\",\n" + "                \"mapValue\": 10,\n" + "                \"reduceValue\": 10,\n" + "                \"totalValue\": 10\n" + "            }\n" + "        ],\n" + "        \"group2\": [\n" + "            {\n" + "                \"counterName\": \"counter1\",\n" + "                \"mapValue\": 15,\n" + "                \"reduceValue\": 15,\n" + "                \"totalValue\": 15\n" + "            }\n" + "        ]\n" + "    },\n" + "    \"finishedAt\": 1317928516754,\n" + "    \"hadoopJob\": \"job_1317928501754_0001\",\n" + "    \"jobConf\": \"/tmp/job.xml\",\n" + "    \"jobName\": \"my job\",\n" + "    \"launchedAt\": 1317928502754,\n" + "    \"status\": \"SUCCEEDED\",\n" + "    \"submittedAt\": 1317928501754,\n" + "    \"taskSummary\": {\n" + "        \"cleanup\": {\n" + "            \"failed\": 0,\n" + "            \"finishTime\": 1317928520754,\n" + "            \"killed\": 0,\n" + "            \"startTime\": 1317928511754,\n" + "            \"successful\": 1,\n" + "            \"total\": 1\n" + "        },\n" + "        \"map\": {\n" + "            \"failed\": 1,\n" + "            \"finishTime\": 1317928516754,\n" + "            \"killed\": 0,\n" + "            \"startTime\": 1317928504754,\n" + "            \"successful\": 5,\n" + "            \"total\": 6\n" + "        },\n" + "        \"reduce\": {\n" + "            \"failed\": 0,\n" + "            \"finishTime\": 1317928518754,\n" + "            \"killed\": 0,\n" + "            \"startTime\": 1317928510754,\n" + "            \"successful\": 1,\n" + "            \"total\": 1\n" + "        },\n" + "        \"setup\": {\n" + "            \"failed\": 0,\n" + "            \"finishTime\": 1317928504754,\n" + "            \"killed\": 0,\n" + "            \"startTime\": 1317928503754,\n" + "            \"successful\": 1,\n" + "            \"total\": 1\n" + "        }\n" + "    },\n" + "    \"tasks\": [\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000002_1\",\n" + "                \"finishTime\": 1317928506754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928504754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000002_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928506754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928504754,\n" + "            \"status\": \"FAILED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000002\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_s_000001_1\",\n" + "                \"finishTime\": 1317928504754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928503754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_s_000001_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928504754,\n" + "            \"startTime\": 1317928503754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_s_000001\",\n" + "            \"type\": \"JOB_SETUP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000006_1\",\n" + "                \"finishTime\": 1317928514754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928508754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000006_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928514754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928508754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000006\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000005_1\",\n" + "                \"finishTime\": 1317928512754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928507754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000005_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928512754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928507754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000005\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000004_1\",\n" + "                \"finishTime\": 1317928510754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928506754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000004_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928510754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928506754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000004\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000003_1\",\n" + "                \"finishTime\": 1317928508754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928505754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000003_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928508754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928505754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000003\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_c_000009_1\",\n" + "                \"finishTime\": 1317928520754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928511754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_c_000009_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928520754,\n" + "            \"startTime\": 1317928511754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_c_000009\",\n" + "            \"type\": \"JOB_CLEANUP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_m_000007_1\",\n" + "                \"finishTime\": 1317928516754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"startTime\": 1317928509754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_m_000007_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928516754,\n" + "            \"inputSplits\": \"\",\n" + "            \"startTime\": 1317928509754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_m_000007\",\n" + "            \"type\": \"MAP\"\n" + "        },\n" + "        {\n" + "            \"attempts\": {\n" + "                \"attemptId\": \"attempt_1317928501754_0001_r_000008_1\",\n" + "                \"finishTime\": 1317928518754,\n" + "                \"hostName\": \"localhost\",\n" + "                \"shuffleFinished\": 1317928518754,\n" + "                \"sortFinished\": 1317928518754,\n" + "                \"startTime\": 1317928510754,\n" + "                \"taskLogs\": \"http://t:1234/tasklog?attemptid=attempt_1317928501754_0001_r_000008_1\"\n" + "            },\n" + "            \"counters\": {\n" + "                \"group1\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 5\n" + "                    },\n" + "                    {\n" + "                        \"counterName\": \"counter2\",\n" + "                        \"value\": 10\n" + "                    }\n" + "                ],\n" + "                \"group2\": [\n" + "                    {\n" + "                        \"counterName\": \"counter1\",\n" + "                        \"value\": 15\n" + "                    }\n" + "                ]\n" + "            },\n" + "            \"finishTime\": 1317928518754,\n" + "            \"startTime\": 1317928510754,\n" + "            \"status\": \"SUCCEEDED\",\n" + "            \"taskId\": \"task_1317928501754_0001_r_000008\",\n" + "            \"type\": \"REDUCE\"\n" + "        }\n" + "    ],\n" + "    \"user\": \"rkanter\"\n" + "}\n", outStr, JSONCompareMode.NON_EXTENSIBLE);
}
174847.6250200hadoop
public boolean init(String[] args) throws ParseException {
    CommandLine cliParser = new GnuParser().parse(opts, args);
    if (args.length == 0) {
        throw new IllegalArgumentException("No args specified for client to initialize");
    }
    if (cliParser.hasOption("log_properties")) {
        String log4jPath = cliParser.getOptionValue("log_properties");
        try {
            Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath);
        } catch (Exception e) {
            LOG.warn("Can not set up custom log4j properties. " + e);
        }
    }
    if (cliParser.hasOption("rolling_log_pattern")) {
        rollingFilesPattern = cliParser.getOptionValue("rolling_log_pattern");
    }
    if (cliParser.hasOption("help")) {
        printUsage();
        return false;
    }
    if (cliParser.hasOption("debug")) {
        debugFlag = true;
    }
    if (cliParser.hasOption("keep_containers_across_application_attempts")) {
        LOG.info("keep_containers_across_application_attempts");
        keepContainers = true;
    }
    if (cliParser.hasOption("placement_spec")) {
        placementSpec = cliParser.getOptionValue("placement_spec");
        PlacementSpec.parse(this.placementSpec);
    }
    appName = cliParser.getOptionValue("appname", "DistributedShell");
    amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
    amQueue = cliParser.getOptionValue("queue", "default");
    amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "-1"));
    amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "-1"));
    if (cliParser.hasOption("master_resources")) {
        Map<String, Long> masterResources = parseResourcesString(cliParser.getOptionValue("master_resources"));
        for (Map.Entry<String, Long> entry : masterResources.entrySet()) {
            if (entry.getKey().equals(ResourceInformation.MEMORY_URI)) {
                amMemory = entry.getValue();
            } else if (entry.getKey().equals(ResourceInformation.VCORES_URI)) {
                amVCores = entry.getValue().intValue();
            } else {
                amResources.put(entry.getKey(), entry.getValue());
            }
        }
    }
    amResourceProfile = cliParser.getOptionValue("master_resource_profile", "");
    if (!cliParser.hasOption("jar")) {
        throw new IllegalArgumentException("No jar file specified for application master");
    }
    appMasterJar = cliParser.getOptionValue("jar");
    if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) {
        throw new IllegalArgumentException("No shell command or shell script specified to be executed by application master");
    } else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) {
        throw new IllegalArgumentException("Can not specify shell_command option " + "and shell_script option at the same time");
    } else if (cliParser.hasOption("shell_command")) {
        shellCommand = cliParser.getOptionValue("shell_command");
    } else {
        shellScriptPath = cliParser.getOptionValue("shell_script");
    }
    if (cliParser.hasOption("shell_args")) {
        shellArgs = cliParser.getOptionValues("shell_args");
    }
    if (cliParser.hasOption("shell_env")) {
        String[] envs = cliParser.getOptionValues("shell_env");
        for (String env : envs) {
            env = env.trim();
            int index = env.indexOf('=');
            if (index == -1) {
                shellEnv.put(env, "");
                continue;
            }
            String key = env.substring(0, index);
            String val = "";
            if (index < (env.length() - 1)) {
                val = env.substring(index + 1);
            }
            shellEnv.put(key, val);
        }
    }
    shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0"));
    if (cliParser.hasOption("container_type")) {
        String containerTypeStr = cliParser.getOptionValue("container_type");
        if (Arrays.stream(ExecutionType.values()).noneMatch(executionType -> executionType.toString().equals(containerTypeStr))) {
            throw new IllegalArgumentException("Invalid container_type: " + containerTypeStr);
        }
        containerType = ExecutionType.valueOf(containerTypeStr);
    }
    if (cliParser.hasOption("promote_opportunistic_after_start")) {
        autoPromoteContainers = true;
    }
    if (cliParser.hasOption("enforce_execution_type")) {
        enforceExecType = true;
    }
    containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "-1"));
    containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "-1"));
    if (cliParser.hasOption("container_resources")) {
        Map<String, Long> resources = parseResourcesString(cliParser.getOptionValue("container_resources"));
        for (Map.Entry<String, Long> entry : resources.entrySet()) {
            if (entry.getKey().equals(ResourceInformation.MEMORY_URI)) {
                containerMemory = entry.getValue();
            } else if (entry.getKey().equals(ResourceInformation.VCORES_URI)) {
                containerVirtualCores = entry.getValue().intValue();
            } else {
                containerResources.put(entry.getKey(), entry.getValue());
            }
        }
    }
    containerResourceProfile = cliParser.getOptionValue("container_resource_profile", "");
    numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1"));
    if (numContainers < 1) {
        throw new IllegalArgumentException("Invalid no. of containers specified," + " exiting. Specified numContainer=" + numContainers);
    }
    nodeLabelExpression = cliParser.getOptionValue("node_label_expression", null);
    clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000"));
    attemptFailuresValidityInterval = Long.parseLong(cliParser.getOptionValue("attempt_failures_validity_interval", "-1"));
    log4jPropFile = cliParser.getOptionValue("log_properties", "");
    if (cliParser.hasOption("domain")) {
        domainId = cliParser.getOptionValue("domain");
        toCreateDomain = cliParser.hasOption("create");
        if (cliParser.hasOption("view_acls")) {
            viewACLs = cliParser.getOptionValue("view_acls");
        }
        if (cliParser.hasOption("modify_acls")) {
            modifyACLs = cliParser.getOptionValue("modify_acls");
        }
    }
    if (cliParser.hasOption("container_retry_policy")) {
        containerRetryOptions.add("--container_retry_policy " + cliParser.getOptionValue("container_retry_policy"));
    }
    if (cliParser.hasOption("container_retry_error_codes")) {
        containerRetryOptions.add("--container_retry_error_codes " + cliParser.getOptionValue("container_retry_error_codes"));
    }
    if (cliParser.hasOption("container_max_retries")) {
        containerRetryOptions.add("--container_max_retries " + cliParser.getOptionValue("container_max_retries"));
    }
    if (cliParser.hasOption("container_retry_interval")) {
        containerRetryOptions.add("--container_retry_interval " + cliParser.getOptionValue("container_retry_interval"));
    }
    if (cliParser.hasOption("container_failures_validity_interval")) {
        containerRetryOptions.add("--container_failures_validity_interval " + cliParser.getOptionValue("container_failures_validity_interval"));
    }
    if (cliParser.hasOption("flow_name")) {
        flowName = cliParser.getOptionValue("flow_name");
    }
    if (cliParser.hasOption("flow_version")) {
        flowVersion = cliParser.getOptionValue("flow_version");
    }
    if (cliParser.hasOption("flow_run_id")) {
        try {
            flowRunId = Long.parseLong(cliParser.getOptionValue("flow_run_id"));
        } catch (NumberFormatException e) {
            throw new IllegalArgumentException("Flow run is not a valid long value", e);
        }
    }
    if (cliParser.hasOption("docker_client_config")) {
        dockerClientConfig = cliParser.getOptionValue("docker_client_config");
    }
    if (cliParser.hasOption("application_tags")) {
        String applicationTagsStr = cliParser.getOptionValue("application_tags");
        String[] appTags = applicationTagsStr.split(",");
        for (String appTag : appTags) {
            this.applicationTags.add(appTag.trim());
        }
    }
    if (cliParser.hasOption("localize_files")) {
        String filesStr = cliParser.getOptionValue("localize_files");
        if (filesStr.contains(",")) {
            String[] files = filesStr.split(",");
            filesToLocalize = Arrays.asList(files);
        } else {
            filesToLocalize.add(filesStr);
        }
    }
    return true;
}
174927.551185wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (!JaxrsDeploymentMarker.isJaxrsDeployment(deploymentUnit)) {
        return;
    }
    if (!DeploymentTypeMarker.isType(DeploymentType.WAR, deploymentUnit)) {
        return;
    }
    final DeploymentUnit parent = deploymentUnit.getParent() == null ? deploymentUnit : deploymentUnit.getParent();
    final WarMetaData warMetaData = deploymentUnit.getAttachment(WarMetaData.ATTACHMENT_KEY);
    final JBossWebMetaData webdata = warMetaData.getMergedJBossWebMetaData();
    setConfigParameters(phaseContext, webdata);
    final ResteasyDeploymentData resteasy = deploymentUnit.getAttachment(JaxrsAttachments.RESTEASY_DEPLOYMENT_DATA);
    if (resteasy == null)
        return;
    final Module module = deploymentUnit.getAttachment(Attachments.MODULE);
    if (module != null) {
        final CapabilityServiceSupport support = deploymentUnit.getAttachment(Attachments.CAPABILITY_SERVICE_SUPPORT);
        final WildFlyConfigurationFactory configurationFactory = WildFlyConfigurationFactory.getInstance();
        configurationFactory.register(module.getClassLoader(), useMicroProfileConfig(module, support));
    }
    final List<ParamValueMetaData> params = webdata.getContextParams();
    boolean entityExpandEnabled = false;
    if (params != null) {
        Iterator<ParamValueMetaData> it = params.iterator();
        while (it.hasNext()) {
            final ParamValueMetaData param = it.next();
            if (param.getParamName().equals(ResteasyContextParameters.RESTEASY_EXPAND_ENTITY_REFERENCES)) {
                entityExpandEnabled = true;
            }
        }
    }
    if (!entityExpandEnabled) {
        setContextParameter(webdata, ResteasyContextParameters.RESTEASY_EXPAND_ENTITY_REFERENCES, "false");
    }
    final Map<ModuleIdentifier, ResteasyDeploymentData> attachmentMap = parent.getAttachment(JaxrsAttachments.ADDITIONAL_RESTEASY_DEPLOYMENT_DATA);
    final List<ResteasyDeploymentData> additionalData = new ArrayList<ResteasyDeploymentData>();
    final ModuleSpecification moduleSpec = deploymentUnit.getAttachment(Attachments.MODULE_SPECIFICATION);
    if (moduleSpec != null && attachmentMap != null) {
        final Set<ModuleIdentifier> identifiers = new HashSet<ModuleIdentifier>();
        for (ModuleDependency dep : moduleSpec.getAllDependencies()) {
            if (!identifiers.contains(dep.getIdentifier())) {
                identifiers.add(dep.getIdentifier());
                if (attachmentMap.containsKey(dep.getIdentifier())) {
                    additionalData.add(attachmentMap.get(dep.getIdentifier()));
                }
            }
        }
        resteasy.merge(additionalData);
    }
    if (!resteasy.getScannedResourceClasses().isEmpty()) {
        StringBuilder buf = null;
        for (String resource : resteasy.getScannedResourceClasses()) {
            if (buf == null) {
                buf = new StringBuilder();
                buf.append(resource);
            } else {
                buf.append(",").append(resource);
            }
        }
        String resources = buf.toString();
        JAXRS_LOGGER.debugf("Adding Jakarta RESTful Web Services resource classes: %s", resources);
        setContextParameter(webdata, ResteasyContextParameters.RESTEASY_SCANNED_RESOURCES, resources);
    }
    if (!resteasy.getScannedProviderClasses().isEmpty()) {
        StringBuilder buf = null;
        for (String provider : resteasy.getScannedProviderClasses()) {
            if (buf == null) {
                buf = new StringBuilder();
                buf.append(provider);
            } else {
                buf.append(",").append(provider);
            }
        }
        String providers = buf.toString();
        JAXRS_LOGGER.debugf("Adding Jakarta RESTful Web Services provider classes: %s", providers);
        setContextParameter(webdata, ResteasyContextParameters.RESTEASY_SCANNED_PROVIDERS, providers);
    }
    if (!resteasy.getScannedJndiComponentResources().isEmpty()) {
        StringBuilder buf = null;
        for (String resource : resteasy.getScannedJndiComponentResources()) {
            if (buf == null) {
                buf = new StringBuilder();
                buf.append(resource);
            } else {
                buf.append(",").append(resource);
            }
        }
        String providers = buf.toString();
        JAXRS_LOGGER.debugf("Adding Jakarta RESTful Web Services jndi component resource classes: %s", providers);
        setContextParameter(webdata, ResteasyContextParameters.RESTEASY_SCANNED_JNDI_RESOURCES, providers);
    }
    if (!resteasy.isUnwrappedExceptionsParameterSet()) {
        setContextParameter(webdata, ResteasyContextParameters.RESTEASY_UNWRAPPED_EXCEPTIONS, "jakarta.ejb.EJBException");
    }
    if (findContextParam(webdata, ResteasyContextParameters.RESTEASY_PREFER_JACKSON_OVER_JSONB) == null) {
        final String prop = WildFlySecurityManager.getPropertyPrivileged(ResteasyContextParameters.RESTEASY_PREFER_JACKSON_OVER_JSONB, null);
        if (prop != null) {
            setContextParameter(webdata, ResteasyContextParameters.RESTEASY_PREFER_JACKSON_OVER_JSONB, prop);
        } else {
            setContextParameter(webdata, ResteasyContextParameters.RESTEASY_PREFER_JACKSON_OVER_JSONB, Boolean.toString(hasJacksonAnnotations(deploymentUnit)));
        }
    }
    boolean managementAdded = false;
    if (!resteasy.getScannedApplicationClasses().isEmpty() || resteasy.hasBootClasses() || resteasy.isDispatcherCreated()) {
        addManagement(deploymentUnit, resteasy);
        managementAdded = true;
    }
    final String value = webdata.getContextParams().stream().filter(contextValue -> "resteasy.server.tracing.type".equals(contextValue.getParamName())).map(ParamValueMetaData::getParamValue).findFirst().orElse(null);
    if (value != null && !"OFF".equals(value)) {
        JAXRS_LOGGER.tracingEnabled(deploymentUnit.getName());
    }
    if (resteasy.hasBootClasses() || resteasy.isDispatcherCreated())
        return;
    Set<Class<? extends Application>> applicationClassSet = new HashSet<>();
    for (Class<? extends Application> clazz : resteasy.getScannedApplicationClasses()) {
        if (clazz.isAnnotationPresent(ApplicationPath.class) || servletMappingsExist(webdata, clazz.getName())) {
            applicationClassSet.add(clazz);
        }
    }
    if (applicationClassSet.isEmpty()) {
        JBossServletMetaData servlet = new JBossServletMetaData();
        servlet.setName(JAX_RS_SERVLET_NAME);
        servlet.setServletClass(HttpServlet30Dispatcher.class.getName());
        servlet.setAsyncSupported(true);
        addServlet(webdata, servlet);
        setServletMappingPrefix(webdata, JAX_RS_SERVLET_NAME, servlet);
    } else {
        for (Class<? extends Application> applicationClass : applicationClassSet) {
            String servletName = null;
            servletName = applicationClass.getName();
            JBossServletMetaData servlet = new JBossServletMetaData();
            servlet.setLoadOnStartup("" + 0);
            servlet.setName(servletName);
            servlet.setServletClass(HttpServlet30Dispatcher.class.getName());
            servlet.setAsyncSupported(true);
            setServletInitParam(servlet, SERVLET_INIT_PARAM, applicationClass.getName());
            addServlet(webdata, servlet);
            if (!servletMappingsExist(webdata, servletName)) {
                try {
                    List<String> patterns = new ArrayList<String>();
                    String pathValue = URLDecoder.decode(applicationClass.getAnnotation(ApplicationPath.class).value().trim(), "UTF-8");
                    if (!pathValue.startsWith("/")) {
                        pathValue = "/" + pathValue;
                    }
                    String prefix = pathValue;
                    if (pathValue.endsWith("/")) {
                        pathValue += "*";
                    } else {
                        pathValue += "/*";
                    }
                    patterns.add(pathValue);
                    setServletInitParam(servlet, "resteasy.servlet.mapping.prefix", prefix);
                    ServletMappingMetaData mapping = new ServletMappingMetaData();
                    mapping.setServletName(servletName);
                    mapping.setUrlPatterns(patterns);
                    if (webdata.getServletMappings() == null) {
                        webdata.setServletMappings(new ArrayList<ServletMappingMetaData>());
                    }
                    webdata.getServletMappings().add(mapping);
                } catch (UnsupportedEncodingException e) {
                    throw new RuntimeException(e);
                }
            } else {
                setServletMappingPrefix(webdata, servletName, servlet);
            }
        }
    }
    if (!managementAdded && webdata.getServletMappings() != null) {
        for (ServletMappingMetaData servletMapMeta : webdata.getServletMappings()) {
            if (JAX_RS_SERVLET_NAME.equals(servletMapMeta.getServletName())) {
                addManagement(deploymentUnit, resteasy);
                break;
            }
        }
    }
    if (deploymentUnit.getParent() == null && (webdata.getServletMappings() == null || webdata.getServletMappings().isEmpty())) {
        JAXRS_LOGGER.noServletDeclaration(deploymentUnit.getName());
    }
}
188489.721289cassandra
public void testMixedOrderColumns4() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, e int, PRIMARY KEY (a, b, c, d, e)) WITH " + "CLUSTERING ORDER BY (b ASC, c DESC, d DESC, e ASC)");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, -1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, 0, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, -1, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 2, -3, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, -1, 1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 1, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 1, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, 0, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 0, -1, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, -1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, 0, 1);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 1, 1, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, 0, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, -1, 0, -1, 0);
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (?, ?, ?, ?, ?)", 0, -1, 0, 0, 0);
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<(?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, 1, 1, -1, 0, -1, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e) < (?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 1, 0, 0, 0, 1, 0, -1, -1), row(0, 1, 0, 0, -1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e) <= (?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 1, 0, 0, 0, 1, 0, -1, -1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, 1, 1, -1, 0, -1, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c)<=(?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, 0, -1, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c)<(?,?) " + "AND (b,c,d,e)>(?,?,?,?)", 0, 2, 0, -1, 0, -1, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b)>=(?)", 0, 2, 0, 1, 1, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e)<=(?,?,?,?) " + "AND (b)>(?)", 0, 2, 0, 1, 1, -1), row(0, 0, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) <= (?,?,?,?)", 0, 1, 0, 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0), row(0, 0, 0, 0, 0), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, -1, -1), row(0, 1, -1, 1, 0), row(0, 1, -1, 1, 1), row(0, 1, -1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) > (?,?,?,?)", 0, 1, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, 1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d,e) >= (?,?,?,?)", 0, 1, 0, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d) >= (?,?,?)", 0, 1, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 1, 0, 0, -1), row(0, 1, 0, 0, 0), row(0, 1, 0, 0, 1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b,c,d) > (?,?,?)", 0, 1, 0, 0), row(0, 1, 1, 0, -1), row(0, 1, 1, 0, 0), row(0, 1, 1, 0, 1), row(0, 1, 1, -1, 0), row(0, 1, 0, 1, -1), row(0, 1, 0, 1, 1), row(0, 2, 0, 1, 1), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1), row(0, 2, -3, 1, 1));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b) < (?) ", 0, 0), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b) <= (?) ", 0, -1), row(0, -1, 0, 0, 0), row(0, -1, 0, -1, 0));
    assertRows(execute("SELECT * FROM %s" + " WHERE a = ? " + "AND (b,c,d,e) < (?,?,?,?) and (b,c,d,e) > (?,?,?,?) ", 0, 2, 0, 0, 0, 2, -2, 0, 0), row(0, 2, 0, -1, 0), row(0, 2, 0, -1, 1), row(0, 2, -1, 1, 1));
}
188576.415279cassandra
public static void createTestVector() {
    StatsTable table1 = createStatsTableTemplate("keyspace1", "table1");
    StatsTable table2 = createStatsTableTemplate("keyspace1", "table2");
    StatsTable table3 = createStatsTableTemplate("keyspace1", "table3");
    StatsTable table4 = createStatsTableTemplate("keyspace2", "table4");
    StatsTable table5 = createStatsTableTemplate("keyspace2", "table5");
    StatsTable table6 = createStatsTableTemplate("keyspace3", "table6");
    table1.averageLiveCellsPerSliceLastFiveMinutes = 6D;
    table2.averageLiveCellsPerSliceLastFiveMinutes = 4.01D;
    table3.averageLiveCellsPerSliceLastFiveMinutes = 0D;
    table4.averageLiveCellsPerSliceLastFiveMinutes = Double.NaN;
    table5.averageLiveCellsPerSliceLastFiveMinutes = 4D;
    table6.averageLiveCellsPerSliceLastFiveMinutes = 5D;
    table1.averageTombstonesPerSliceLastFiveMinutes = 5D;
    table2.averageTombstonesPerSliceLastFiveMinutes = 4.001D;
    table3.averageTombstonesPerSliceLastFiveMinutes = Double.NaN;
    table4.averageTombstonesPerSliceLastFiveMinutes = 0D;
    table5.averageTombstonesPerSliceLastFiveMinutes = 4.01D;
    table6.averageTombstonesPerSliceLastFiveMinutes = 6D;
    table1.bloomFilterFalsePositives = 30L;
    table2.bloomFilterFalsePositives = 600L;
    table3.bloomFilterFalsePositives = 20L;
    table4.bloomFilterFalsePositives = 500L;
    table5.bloomFilterFalsePositives = 10L;
    table6.bloomFilterFalsePositives = 400L;
    table1.bloomFilterFalseRatio = 0.40D;
    table2.bloomFilterFalseRatio = 0.01D;
    table3.bloomFilterFalseRatio = 0.50D;
    table4.bloomFilterFalseRatio = 0.02D;
    table5.bloomFilterFalseRatio = 0.60D;
    table6.bloomFilterFalseRatio = 0.03D;
    table1.bloomFilterSpaceUsed = "789";
    table2.bloomFilterSpaceUsed = "161718";
    table3.bloomFilterSpaceUsed = "456";
    table4.bloomFilterSpaceUsed = "131415";
    table5.bloomFilterSpaceUsed = "123";
    table6.bloomFilterSpaceUsed = "101112";
    table1.compactedPartitionMaximumBytes = 60L;
    table2.compactedPartitionMaximumBytes = 30L;
    table3.compactedPartitionMaximumBytes = 50L;
    table4.compactedPartitionMaximumBytes = 20L;
    table5.compactedPartitionMaximumBytes = 40L;
    table6.compactedPartitionMaximumBytes = 20L;
    table1.compactedPartitionMeanBytes = 6L;
    table2.compactedPartitionMeanBytes = 4L;
    table3.compactedPartitionMeanBytes = 5L;
    table4.compactedPartitionMeanBytes = 4L;
    table5.compactedPartitionMeanBytes = 4L;
    table6.compactedPartitionMeanBytes = 3L;
    table1.compactedPartitionMinimumBytes = 2L;
    table2.compactedPartitionMinimumBytes = 4L;
    table3.compactedPartitionMinimumBytes = 2L;
    table4.compactedPartitionMinimumBytes = 5L;
    table5.compactedPartitionMinimumBytes = 3L;
    table6.compactedPartitionMinimumBytes = 6L;
    table1.localReadCount = 0L;
    table2.localReadCount = 1L;
    table3.localReadCount = 2L;
    table4.localReadCount = 3L;
    table5.localReadCount = 4L;
    table6.localReadCount = 5L;
    table1.localReadLatencyMs = 2D;
    table2.localReadLatencyMs = 3D;
    table3.localReadLatencyMs = 4D;
    table4.localReadLatencyMs = Double.NaN;
    table5.localReadLatencyMs = 0D;
    table6.localReadLatencyMs = 1D;
    table1.localWriteCount = 5L;
    table2.localWriteCount = 4L;
    table3.localWriteCount = 3L;
    table4.localWriteCount = 2L;
    table5.localWriteCount = 1L;
    table6.localWriteCount = 0L;
    table1.localWriteLatencyMs = 0.05D;
    table2.localWriteLatencyMs = 0D;
    table3.localWriteLatencyMs = Double.NaN;
    table4.localWriteLatencyMs = 2D;
    table5.localWriteLatencyMs = 1D;
    table6.localWriteLatencyMs = 0.5D;
    table1.maximumLiveCellsPerSliceLastFiveMinutes = 6L;
    table2.maximumLiveCellsPerSliceLastFiveMinutes = 5L;
    table3.maximumLiveCellsPerSliceLastFiveMinutes = 5L;
    table4.maximumLiveCellsPerSliceLastFiveMinutes = 3L;
    table5.maximumLiveCellsPerSliceLastFiveMinutes = 3L;
    table6.maximumLiveCellsPerSliceLastFiveMinutes = 2L;
    table1.maximumTombstonesPerSliceLastFiveMinutes = 1L;
    table2.maximumTombstonesPerSliceLastFiveMinutes = 2L;
    table3.maximumTombstonesPerSliceLastFiveMinutes = 3L;
    table4.maximumTombstonesPerSliceLastFiveMinutes = 3L;
    table5.maximumTombstonesPerSliceLastFiveMinutes = 5L;
    table6.maximumTombstonesPerSliceLastFiveMinutes = 6L;
    table1.memtableCellCount = 111L;
    table2.memtableCellCount = 22L;
    table3.memtableCellCount = 333333L;
    table4.memtableCellCount = 4L;
    table5.memtableCellCount = 55555L;
    table6.memtableCellCount = 6666L;
    table1.memtableDataSize = "0";
    table2.memtableDataSize = "900";
    table3.memtableDataSize = "1999";
    table4.memtableDataSize = "3000";
    table5.memtableDataSize = "20000";
    table6.memtableDataSize = "1000000";
    table1.memtableSwitchCount = 1L;
    table2.memtableSwitchCount = 22222L;
    table3.memtableSwitchCount = 3333L;
    table4.memtableSwitchCount = 444444L;
    table5.memtableSwitchCount = 5L;
    table6.memtableSwitchCount = 6L;
    table1.numberOfPartitionsEstimate = 111111L;
    table2.numberOfPartitionsEstimate = 22222L;
    table3.numberOfPartitionsEstimate = 3333L;
    table4.numberOfPartitionsEstimate = 444L;
    table5.numberOfPartitionsEstimate = 55L;
    table6.numberOfPartitionsEstimate = 6L;
    table1.pendingFlushes = 11111L;
    table2.pendingFlushes = 222222L;
    table3.pendingFlushes = 333L;
    table4.pendingFlushes = 4444L;
    table5.pendingFlushes = 5L;
    table6.pendingFlushes = 66L;
    table1.percentRepaired = 100.0D;
    table2.percentRepaired = 99.9D;
    table3.percentRepaired = 99.8D;
    table4.percentRepaired = 50.0D;
    table5.percentRepaired = 93.0D;
    table6.percentRepaired = 0.0D;
    table1.spaceUsedBySnapshotsTotal = "1111";
    table2.spaceUsedBySnapshotsTotal = "222";
    table3.spaceUsedBySnapshotsTotal = "0";
    table4.spaceUsedBySnapshotsTotal = "44";
    table5.spaceUsedBySnapshotsTotal = "55555";
    table6.spaceUsedBySnapshotsTotal = "0";
    table1.spaceUsedLive = "0";
    table2.spaceUsedLive = "22";
    table3.spaceUsedLive = "0";
    table4.spaceUsedLive = "4444";
    table5.spaceUsedLive = "55555";
    table6.spaceUsedLive = "666666";
    table1.spaceUsedTotal = "9001";
    table2.spaceUsedTotal = "1024";
    table3.spaceUsedTotal = "512";
    table4.spaceUsedTotal = "256";
    table5.spaceUsedTotal = "64";
    table6.spaceUsedTotal = "0";
    table1.sstableCompressionRatio = 0.68D;
    table2.sstableCompressionRatio = 0.68D;
    table3.sstableCompressionRatio = 0.32D;
    table4.sstableCompressionRatio = 0.95D;
    table5.sstableCompressionRatio = 0.99D;
    table6.sstableCompressionRatio = 0.68D;
    table1.sstableCount = 60000;
    table2.sstableCount = 3000;
    table3.sstableCount = 50000;
    table4.sstableCount = 2000;
    table5.sstableCount = 40000;
    table6.sstableCount = 1000;
    table1.droppableTombstoneRatio = 0;
    table2.droppableTombstoneRatio = 0.222222;
    table3.droppableTombstoneRatio = 0.333333;
    table4.droppableTombstoneRatio = 0.444444;
    table5.droppableTombstoneRatio = 0.555555;
    table6.droppableTombstoneRatio = 0.666666;
    table2.offHeapUsed = true;
    table4.offHeapUsed = true;
    table6.offHeapUsed = true;
    table2.memtableOffHeapUsed = true;
    table4.memtableOffHeapUsed = true;
    table6.memtableOffHeapUsed = true;
    table2.bloomFilterOffHeapUsed = true;
    table4.bloomFilterOffHeapUsed = true;
    table6.bloomFilterOffHeapUsed = true;
    table2.compressionMetadataOffHeapUsed = true;
    table4.compressionMetadataOffHeapUsed = true;
    table6.compressionMetadataOffHeapUsed = true;
    table2.indexSummaryOffHeapUsed = true;
    table4.indexSummaryOffHeapUsed = true;
    table6.indexSummaryOffHeapUsed = true;
    table2.offHeapMemoryUsedTotal = "314159367";
    table4.offHeapMemoryUsedTotal = "441213818";
    table6.offHeapMemoryUsedTotal = "162470810";
    table2.bloomFilterOffHeapMemoryUsed = "98";
    table4.bloomFilterOffHeapMemoryUsed = "299792458";
    table6.bloomFilterOffHeapMemoryUsed = "667408";
    table2.compressionMetadataOffHeapMemoryUsed = "3";
    table4.compressionMetadataOffHeapMemoryUsed = "2";
    table6.compressionMetadataOffHeapMemoryUsed = "1";
    table2.indexSummaryOffHeapMemoryUsed = "1";
    table4.indexSummaryOffHeapMemoryUsed = "2";
    table6.indexSummaryOffHeapMemoryUsed = "3";
    table2.memtableOffHeapMemoryUsed = "314159265";
    table4.memtableOffHeapMemoryUsed = "141421356";
    table6.memtableOffHeapMemoryUsed = "161803398";
    table2.twcsDurationInMillis = 2000L;
    table4.twcsDurationInMillis = 1000L;
    table5.twcsDurationInMillis = null;
    testKeyspaces = new ArrayList<>();
    StatsKeyspace keyspace1 = createStatsKeyspaceTemplate("keyspace1");
    StatsKeyspace keyspace2 = createStatsKeyspaceTemplate("keyspace2");
    StatsKeyspace keyspace3 = createStatsKeyspaceTemplate("keyspace3");
    keyspace1.tables.add(table1);
    keyspace1.tables.add(table2);
    keyspace1.tables.add(table3);
    keyspace2.tables.add(table4);
    keyspace2.tables.add(table5);
    keyspace3.tables.add(table6);
    testKeyspaces.add(keyspace1);
    testKeyspaces.add(keyspace2);
    testKeyspaces.add(keyspace3);
    for (int i = 0; i < testKeyspaces.size(); i++) {
        StatsKeyspace ks = testKeyspaces.get(i);
        for (StatsTable st : ks.tables) {
            ks.readCount += st.localReadCount;
            ks.writeCount += st.localWriteCount;
            ks.pendingFlushes += (long) st.pendingFlushes;
        }
        testKeyspaces.set(i, ks);
    }
    testTables = new ArrayList<>();
    testTables.add(table1);
    testTables.add(table2);
    testTables.add(table3);
    testTables.add(table4);
    testTables.add(table5);
    testTables.add(table6);
    StatsTable humanReadableTable1 = createStatsTableTemplate("keyspace1", "table1");
    StatsTable humanReadableTable2 = createStatsTableTemplate("keyspace1", "table2");
    StatsTable humanReadableTable3 = createStatsTableTemplate("keyspace1", "table3");
    StatsTable humanReadableTable4 = createStatsTableTemplate("keyspace2", "table4");
    StatsTable humanReadableTable5 = createStatsTableTemplate("keyspace2", "table5");
    StatsTable humanReadableTable6 = createStatsTableTemplate("keyspace3", "table6");
    humanReadableTable1.spaceUsedTotal = "999 bytes";
    humanReadableTable2.spaceUsedTotal = "5 KiB";
    humanReadableTable3.spaceUsedTotal = "40 KiB";
    humanReadableTable4.spaceUsedTotal = "3 MiB";
    humanReadableTable5.spaceUsedTotal = "2 GiB";
    humanReadableTable6.spaceUsedTotal = "1 TiB";
    humanReadableTable1.memtableDataSize = "1.21 TiB";
    humanReadableTable2.memtableDataSize = "42 KiB";
    humanReadableTable3.memtableDataSize = "2.71 GiB";
    humanReadableTable4.memtableDataSize = "999 bytes";
    humanReadableTable5.memtableDataSize = "3.14 MiB";
    humanReadableTable6.memtableDataSize = "0 bytes";
    humanReadableKeyspaces = new ArrayList<>();
    StatsKeyspace humanReadableKeyspace1 = createStatsKeyspaceTemplate("keyspace1");
    StatsKeyspace humanReadableKeyspace2 = createStatsKeyspaceTemplate("keyspace2");
    StatsKeyspace humanReadableKeyspace3 = createStatsKeyspaceTemplate("keyspace3");
    humanReadableKeyspace1.tables.add(humanReadableTable1);
    humanReadableKeyspace1.tables.add(humanReadableTable2);
    humanReadableKeyspace1.tables.add(humanReadableTable3);
    humanReadableKeyspace2.tables.add(humanReadableTable4);
    humanReadableKeyspace2.tables.add(humanReadableTable5);
    humanReadableKeyspace3.tables.add(humanReadableTable6);
    humanReadableKeyspaces.add(humanReadableKeyspace1);
    humanReadableKeyspaces.add(humanReadableKeyspace2);
    humanReadableKeyspaces.add(humanReadableKeyspace3);
    for (int i = 0; i < humanReadableKeyspaces.size(); i++) {
        StatsKeyspace ks = humanReadableKeyspaces.get(i);
        for (StatsTable st : ks.tables) {
            ks.readCount += st.localReadCount;
            ks.writeCount += st.localWriteCount;
            ks.pendingFlushes += (long) st.pendingFlushes;
        }
        humanReadableKeyspaces.set(i, ks);
    }
    humanReadableTables = new ArrayList<>();
    humanReadableTables.add(humanReadableTable1);
    humanReadableTables.add(humanReadableTable2);
    humanReadableTables.add(humanReadableTable3);
    humanReadableTables.add(humanReadableTable4);
    humanReadableTables.add(humanReadableTable5);
    humanReadableTables.add(humanReadableTable6);
}
188844.9610243elasticsearch
public void testParallelCache() {
    String nameDoc = "doc";
    String nameSource = "source";
    String nameDocAndSource = "docAndSource";
    MappedFieldType docMappedFieldType = mock(MappedFieldType.class);
    MappedFieldType sourceMappedFieldType = mock(MappedFieldType.class);
    MappedFieldType docAndSourceMappedFieldType = mock(MappedFieldType.class);
    Map<String, MappedFieldType> namesToMappedFieldTypes = Map.of(nameDoc, docMappedFieldType, nameSource, sourceMappedFieldType, nameDocAndSource, docAndSourceMappedFieldType);
    IndexFieldData<?> docIndexFieldData = mock(IndexFieldData.class);
    SourceValueFetcherIndexFieldData<?> sourceIndexFieldData = mock(SourceValueFetcherIndexFieldData.class);
    IndexFieldData<?> docAndSourceDocIndexFieldData = mock(IndexFieldData.class);
    SourceValueFetcherIndexFieldData<?> docAndSourceSourceIndexFieldData = mock(SourceValueFetcherIndexFieldData.class);
    LeafFieldData docLeafFieldData = mock(LeafFieldData.class);
    LeafFieldData sourceLeafFieldData = mock(SourceValueFetcherIndexFieldData.SourceValueFetcherLeafFieldData.class);
    LeafFieldData docAndSourceDocLeafFieldData = mock(LeafFieldData.class);
    LeafFieldData docAndSourceSourceLeafFieldData = mock(SourceValueFetcherIndexFieldData.SourceValueFetcherLeafFieldData.class);
    DocValuesScriptFieldFactory docFactory = mock(DocValuesScriptFieldFactory.class);
    DocValuesScriptFieldFactory sourceFactory = mock(DocValuesScriptFieldFactory.class);
    DocValuesScriptFieldFactory docAndSourceDocFactory = mock(DocValuesScriptFieldFactory.class);
    DocValuesScriptFieldFactory docAndSourceSourceFactory = mock(DocValuesScriptFieldFactory.class);
    ScriptDocValues<?> docDocValues = mock(ScriptDocValues.class);
    Field<?> fieldDocValues = mock(Field.class);
    Field<?> fieldSourceValues = mock(Field.class);
    ScriptDocValues<?> docSourceAndDocValues = mock(ScriptDocValues.class);
    Field<?> fieldSourceAndDocValues = mock(Field.class);
    doReturn(docLeafFieldData).when(docIndexFieldData).load(any());
    doReturn(docFactory).when(docLeafFieldData).getScriptFieldFactory(nameDoc);
    doReturn(docDocValues).when(docFactory).toScriptDocValues();
    doReturn(fieldDocValues).when(docFactory).toScriptField();
    doReturn(sourceLeafFieldData).when(sourceIndexFieldData).load(any());
    doReturn(sourceFactory).when(sourceLeafFieldData).getScriptFieldFactory(nameSource);
    doReturn(fieldSourceValues).when(sourceFactory).toScriptField();
    doReturn(docAndSourceDocLeafFieldData).when(docAndSourceDocIndexFieldData).load(any());
    doReturn(docAndSourceDocFactory).when(docAndSourceDocLeafFieldData).getScriptFieldFactory(nameDocAndSource);
    doReturn(docSourceAndDocValues).when(docAndSourceDocFactory).toScriptDocValues();
    doReturn(docAndSourceSourceLeafFieldData).when(docAndSourceSourceIndexFieldData).load(any());
    doReturn(docAndSourceSourceFactory).when(docAndSourceSourceLeafFieldData).getScriptFieldFactory(nameDocAndSource);
    doReturn(fieldSourceAndDocValues).when(docAndSourceSourceFactory).toScriptField();
    LeafDocLookup leafDocLookup = new LeafDocLookup(namesToMappedFieldTypes::get, (mappedFieldType, operation) -> {
        if (mappedFieldType.equals(docMappedFieldType)) {
            if (operation == SEARCH || operation == SCRIPT) {
                return docIndexFieldData;
            } else {
                throw new IllegalArgumentException("unknown operation [" + operation + "]");
            }
        } else if (mappedFieldType.equals(sourceMappedFieldType)) {
            if (operation == SEARCH) {
                throw new IllegalArgumentException("search cannot access source");
            } else if (operation == SCRIPT) {
                return sourceIndexFieldData;
            } else {
                throw new IllegalArgumentException("unknown operation [" + operation + "]");
            }
        } else if (mappedFieldType.equals(docAndSourceMappedFieldType)) {
            if (operation == SEARCH) {
                return docAndSourceDocIndexFieldData;
            } else if (operation == SCRIPT) {
                return docAndSourceSourceIndexFieldData;
            } else {
                throw new IllegalArgumentException("unknown operation [" + operation + "]");
            }
        } else {
            throw new IllegalArgumentException("unknown mapped field type [" + mappedFieldType + "]");
        }
    }, null);
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertTrue(leafDocLookup.fieldFactoryCache.isEmpty());
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertTrue(leafDocLookup.docFactoryCache.isEmpty());
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    expectThrows(IllegalArgumentException.class, () -> leafDocLookup.get(nameSource));
    assertTrue(leafDocLookup.docFactoryCache.isEmpty());
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertTrue(leafDocLookup.fieldFactoryCache.isEmpty());
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertTrue(leafDocLookup.docFactoryCache.isEmpty());
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(1, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(1, leafDocLookup.docFactoryCache.size());
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    leafDocLookup.docFactoryCache.clear();
    leafDocLookup.fieldFactoryCache.clear();
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(fieldDocValues, leafDocLookup.getScriptField(nameDoc));
    assertEquals(fieldSourceValues, leafDocLookup.getScriptField(nameSource));
    assertEquals(fieldSourceAndDocValues, leafDocLookup.getScriptField(nameDocAndSource));
    assertEquals(docDocValues, leafDocLookup.get(nameDoc));
    assertEquals(docSourceAndDocValues, leafDocLookup.get(nameDocAndSource));
    assertEquals(3, leafDocLookup.fieldFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.fieldFactoryCache.get(nameDoc).factory);
    assertEquals(sourceFactory, leafDocLookup.fieldFactoryCache.get(nameSource).factory);
    assertEquals(docAndSourceSourceFactory, leafDocLookup.fieldFactoryCache.get(nameDocAndSource).factory);
    assertEquals(2, leafDocLookup.docFactoryCache.size());
    assertEquals(docFactory, leafDocLookup.docFactoryCache.get(nameDoc).factory);
    assertEquals(docAndSourceDocFactory, leafDocLookup.docFactoryCache.get(nameDocAndSource).factory);
}
187264.941298elasticsearch
 static RoleDescriptor kibanaSystem(String name) {
    return new RoleDescriptor(name, new String[] { "monitor", "manage_index_templates", MonitoringBulkAction.NAME, "manage_saml", "manage_token", "manage_oidc", "manage_enrich", "manage_pipeline", "manage_ilm", "manage_transform", InvalidateApiKeyAction.NAME, "grant_api_key", "manage_own_api_key", GetBuiltinPrivilegesAction.NAME, "delegate_pki", GetProfilesAction.NAME, ActivateProfileAction.NAME, SuggestProfilesAction.NAME, ProfileHasPrivilegesAction.NAME, "write_fleet_secrets", "manage_ml", "cluster:admin/analyze", "monitor_text_structure", "cancel_task" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".management-beats").privileges("create_index", "read", "write").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".ml-anomalies*", ".ml-stats-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".ml-annotations*", ".ml-notifications*").privileges("read", "write").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".apm-agent-configuration").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".apm-custom-link").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".apm-source-map").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices("apm-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-apm.*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-apm.*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm.*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("traces-apm-*").privileges("read", "read_cross_cluster").build(), RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata", "monitor").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-secrets*").privileges("write", "delete", "create_index").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-actions*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-agents*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-artifacts*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-enrollment-api-keys*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-policies*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-policies-leader*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-servers*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-fileds*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-file-data-*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-files-*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-filedelivery-data-*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices(".fleet-filedelivery-meta-*").privileges("all").allowRestrictedIndices(true).build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-elastic_agent*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-fleet_server*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-fleet_server*").privileges("read", "delete_index").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.LISTS_INDEX, ReservedRolesStore.LISTS_ITEMS_INDEX).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_BACKING_INDEX).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_INDEX_ALIAS).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS).privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.policy-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.metrics-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-endpoint.events.*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-*", "synthetics-*", "traces-*", "/metrics-.*&~(metrics-endpoint\\.metadata_current_default.*)/", ".logs-endpoint.action.responses-*", ".logs-endpoint.diagnostic.collection-*", ".logs-endpoint.actions-*", ".logs-endpoint.heartbeat-*", ".logs-osquery_manager.actions-*", ".logs-osquery_manager.action.responses-*", "profiling-*").privileges(TransportUpdateSettingsAction.TYPE.name(), TransportPutMappingAction.TYPE.name(), RolloverAction.NAME, "indices:admin/data_stream/lifecycle/put").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.action.responses-*").privileges("auto_configure", "read", "write").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.actions-*").privileges("auto_configure", "read", "write").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-osquery_manager.action.responses-*").privileges("auto_configure", "create_index", "read", "index", "delete").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-osquery_manager.actions-*").privileges("auto_configure", "create_index", "read", "index", "write", "delete").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-sentinel_one.*", "logs-crowdstrike.*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*", "logs-apm-*", "logs-apm.*-*", "metrics-apm-*", "metrics-apm.*-*", "traces-apm-*", "traces-apm.*-*", "synthetics-http-*", "synthetics-icmp-*", "synthetics-tcp-*", "synthetics-browser-*", "synthetics-browser.network-*", "synthetics-browser.screenshot-*").privileges(TransportDeleteIndexAction.TYPE.name()).build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.metadata*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.metadata_current_default*", ".metrics-endpoint.metadata_current_default*", ".metrics-endpoint.metadata_united_default*").privileges("create_index", "delete_index", "read", "index", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()).build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-ti_*_latest.*").privileges("create_index", "delete_index", "read", "index", "delete", "manage", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()).build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-ti_*.*-*").privileges(TransportDeleteIndexAction.TYPE.name(), "read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("kibana_sample_data_*").privileges("create_index", "delete_index", "read", "index", "view_index_metadata", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()).build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-cloud_security_posture.findings-*", "logs-cloud_security_posture.vulnerabilities-*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-cloud_security_posture.findings_latest-default*", "logs-cloud_security_posture.scores-default*", "logs-cloud_security_posture.vulnerabilities_latest-default*").privileges("create_index", "read", "index", "delete", TransportIndicesAliasesAction.NAME, TransportUpdateSettingsAction.TYPE.name()).build(), RoleDescriptor.IndicesPrivileges.builder().indices("risk-score.risk-*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".asset-criticality.asset-criticality-*").privileges("create_index", "manage", "read").build(), RoleDescriptor.IndicesPrivileges.builder().indices("logs-cloud_defend.*", "metrics-cloud_defend.*").privileges("read", "view_index_metadata").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".slo-observability.*").privileges("all").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read").build(), RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, null, new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("kibana-*")), new ConfigurableClusterPrivileges.WriteProfileDataPrivileges(Set.of("kibana*")) }, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null, new RoleDescriptor.RemoteIndicesPrivileges[] { getRemoteIndicesReadPrivileges(".monitoring-*"), getRemoteIndicesReadPrivileges("apm-*"), getRemoteIndicesReadPrivileges("logs-apm.*"), getRemoteIndicesReadPrivileges("metrics-apm.*"), getRemoteIndicesReadPrivileges("traces-apm.*"), getRemoteIndicesReadPrivileges("traces-apm-*") }, null, null, "Grants access necessary for the Kibana system user to read from and write to the Kibana indices, " + "manage index templates and tokens, and check the availability of the Elasticsearch cluster. " + "It also permits activating, searching, and retrieving user profiles, " + "as well as updating user profile data for the kibana-* namespace. " + "Additionally, this role grants read access to the .monitoring-* indices " + "and read and write access to the .reporting-* indices. " + "Note: This role should not be assigned to users as the granted permissions may change between releases.");
}
185465.551330elasticsearch
public void testDetectReasonToRebalanceModels() {
    String model1 = "model-1";
    String model2 = "model-2";
    String mlNode1 = "ml-node-with-room";
    String mlNode2 = "new-ml-node-with-room";
    DiscoveryNode mlNode1Node = buildNode(mlNode1, true, ByteSizeValue.ofGb(4).getBytes(), 8);
    DiscoveryNode mlNode2Node = buildNode(mlNode2, true, ByteSizeValue.ofGb(4).getBytes(), 8);
    ClusterState stateWithTwoNodes = ClusterState.builder(new ClusterName("testDetectReasonToRebalanceModels")).nodes(DiscoveryNodes.builder().add(mlNode1Node).add(mlNode2Node)).build();
    ClusterState stateWithOneNode = ClusterState.builder(new ClusterName("testDetectReasonToRebalanceModels")).nodes(DiscoveryNodes.builder().add(mlNode1Node)).build();
    ClusterState stateWithOneNodeNotMl = ClusterState.builder(new ClusterName("testDetectReasonToRebalanceModels")).nodes(DiscoveryNodes.builder().add(mlNode1Node).add(buildNode("not-ml-node", false, ByteSizeValue.ofGb(4).getBytes(), 8))).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(randomFrom(stateWithOneNodeNotMl, stateWithOneNode, stateWithTwoNodes)).build(), ClusterState.builder(randomFrom(stateWithOneNodeNotMl, stateWithOneNode, stateWithTwoNodes)).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.empty()));
    ClusterState randomState = randomFrom(stateWithOneNodeNotMl, stateWithOneNode, stateWithTwoNodes);
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(randomState).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()).build()).build(), ClusterState.builder(randomState).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()).build()).build())), equalTo(Optional.empty()));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build(), ClusterState.builder(stateWithOneNodeNotMl).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.empty()));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build(), ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.empty()));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build(), ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.of("nodes changed")));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).stopAssignment("test")).build()).build()).build(), ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.empty()));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode2)).build()).build(), ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100))).build()).build()).build())), equalTo(Optional.empty()));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(model2, TrainedModelAssignment.Builder.empty(newParams("model-2", 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")).addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build(), ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(model2, TrainedModelAssignment.Builder.empty(newParams("model-2", 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")).addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build())), equalTo(Optional.of("nodes changed")));
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", ClusterState.builder(stateWithOneNode).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(model2, TrainedModelAssignment.Builder.empty(newParams("model-2", 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")).addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, "")).stopAssignment("test")).build()).build()).build(), ClusterState.builder(stateWithTwoNodes).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(model2, TrainedModelAssignment.Builder.empty(newParams("model-2", 100)).addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")).addRoutingEntry(mlNode2, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build())), equalTo(Optional.empty()));
}
18674.5147348hadoop
public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException {
    if ((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) {
        return Response.status(Response.Status.FORBIDDEN).build();
    }
    UserGroupInformation user = HttpUserGroupInformation.get();
    Response response;
    path = makeAbsolute(path);
    MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
    MDC.put("hostname", request.getRemoteAddr());
    switch(op.value()) {
        case OPEN:
            {
                Boolean noRedirect = params.get(NoRedirectParam.NAME, NoRedirectParam.class);
                if (noRedirect) {
                    URI redirectURL = createOpenRedirectionURL(uriInfo);
                    final String js = JsonUtil.toJsonString("Location", redirectURL);
                    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                } else {
                    final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
                    final FileSystem fs = createFileSystem(user);
                    InputStream is = null;
                    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser());
                    try {
                        is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {

                            @Override
                            public InputStream run() throws Exception {
                                return command.execute(fs);
                            }
                        });
                    } catch (InterruptedException ie) {
                        LOG.warn("Open interrupted.", ie);
                        Thread.currentThread().interrupt();
                    }
                    Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
                    Long len = params.get(LenParam.NAME, LenParam.class);
                    AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len });
                    InputStreamEntity entity = new InputStreamEntity(is, offset, len);
                    response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
                }
                break;
            }
        case GETFILESTATUS:
            {
                FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case LISTSTATUS:
            {
                String filter = params.get(FilterParam.NAME, FilterParam.class);
                FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETHOMEDIRECTORY:
            {
                enforceRootPath(op.value(), path);
                FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("Home Directory for [{}]", user);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case INSTRUMENTATION:
            {
                enforceRootPath(op.value(), path);
                Groups groups = HttpFSServerWebApp.get().get(Groups.class);
                Set<String> userGroups = groups.getGroupsSet(user.getShortUserName());
                if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
                    throw new AccessControlException("User not in HttpFSServer admin group");
                }
                Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
                Map snapshot = instrumentation.getSnapshot();
                response = Response.ok(snapshot).build();
                break;
            }
        case GETCONTENTSUMMARY:
            {
                FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("Content summary for [{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETQUOTAUSAGE:
            {
                FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("Quota Usage for [{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETFILECHECKSUM:
            {
                FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path);
                Boolean noRedirect = params.get(NoRedirectParam.NAME, NoRedirectParam.class);
                AUDIT_LOG.info("[{}]", path);
                if (noRedirect) {
                    URI redirectURL = createOpenRedirectionURL(uriInfo);
                    final String js = JsonUtil.toJsonString("Location", redirectURL);
                    response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                } else {
                    Map json = fsExecute(user, command);
                    response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                }
                break;
            }
        case GETFILEBLOCKLOCATIONS:
            {
                long offset = 0;
                long len = Long.MAX_VALUE;
                Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
                Long lenParam = params.get(LenParam.NAME, LenParam.class);
                AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
                if (offsetParam != null && offsetParam > 0) {
                    offset = offsetParam;
                }
                if (lenParam != null && lenParam > 0) {
                    len = lenParam;
                }
                FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len);
                @SuppressWarnings("rawtypes")
                Map locations = fsExecute(user, command);
                final String json = JsonUtil.toJsonString("BlockLocations", locations);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETACLSTATUS:
            {
                FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("ACL status for [{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETXATTRS:
            {
                List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
                XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
                FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
                @SuppressWarnings("rawtypes")
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("XAttrs for [{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case LISTXATTRS:
            {
                FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
                @SuppressWarnings("rawtypes")
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("XAttr names for [{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case LISTSTATUS_BATCH:
            {
                String startAfter = params.get(HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class);
                byte[] token = HttpFSUtils.EMPTY_BYTES;
                if (startAfter != null) {
                    token = startAfter.getBytes(StandardCharsets.UTF_8);
                }
                FSOperations.FSListStatusBatch command = new FSOperations.FSListStatusBatch(path, token);
                @SuppressWarnings("rawtypes")
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("[{}] token [{}]", path, token);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETTRASHROOT:
            {
                FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETALLSTORAGEPOLICY:
            {
                FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies();
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSTORAGEPOLICY:
            {
                FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path);
                JSONObject json = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSNAPSHOTDIFF:
            {
                String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class);
                String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class);
                FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName);
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSNAPSHOTDIFFLISTING:
            {
                String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class);
                String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class);
                String snapshotDiffStartPath = params.get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class);
                Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class);
                FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex);
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSNAPSHOTTABLEDIRECTORYLIST:
            {
                FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing();
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", "/");
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSNAPSHOTLIST:
            {
                FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path);
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", "/");
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSERVERDEFAULTS:
            {
                FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults();
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", "/");
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case CHECKACCESS:
            {
                String mode = params.get(FsActionParam.NAME, FsActionParam.class);
                FsActionParam fsparam = new FsActionParam(mode);
                FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value()));
                fsExecute(user, command);
                AUDIT_LOG.info("[{}]", "/");
                response = Response.ok().build();
                break;
            }
        case GETECPOLICY:
            {
                FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path);
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETECPOLICIES:
            {
                FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies();
                String js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETECCODECS:
            {
                FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs();
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GET_BLOCK_LOCATIONS:
            {
                long offset = 0;
                long len = Long.MAX_VALUE;
                Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
                Long lenParam = params.get(LenParam.NAME, LenParam.class);
                AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
                if (offsetParam != null && offsetParam > 0) {
                    offset = offsetParam;
                }
                if (lenParam != null && lenParam > 0) {
                    len = lenParam;
                }
                FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len);
                @SuppressWarnings("rawtypes")
                Map locations = fsExecute(user, command);
                final String json = JsonUtil.toJsonString("LocatedBlocks", locations);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETFILELINKSTATUS:
            {
                FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path);
                @SuppressWarnings("rawtypes")
                Map js = fsExecute(user, command);
                AUDIT_LOG.info("[{}]", path);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETSTATUS:
            {
                FSOperations.FSStatus command = new FSOperations.FSStatus(path);
                @SuppressWarnings("rawtypes")
                Map js = fsExecute(user, command);
                response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        case GETTRASHROOTS:
            {
                Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class);
                FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers);
                Map json = fsExecute(user, command);
                AUDIT_LOG.info("allUsers [{}]", allUsers);
                response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
                break;
            }
        default:
            {
                throw new IOException(MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
            }
    }
    return response;
}
184424.0237214hadoop
private static ApplicationReportExt convertToApplicationReport(TimelineEntity entity, ApplicationReportField field) {
    String user = null;
    String queue = null;
    String name = null;
    String type = null;
    boolean unmanagedApplication = false;
    long createdTime = 0;
    long launchTime = 0;
    long submittedTime = 0;
    long finishedTime = 0;
    float progress = 0.0f;
    int applicationPriority = 0;
    ApplicationAttemptId latestApplicationAttemptId = null;
    String diagnosticsInfo = null;
    FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
    YarnApplicationState state = YarnApplicationState.ACCEPTED;
    ApplicationResourceUsageReport appResources = null;
    Set<String> appTags = null;
    Map<ApplicationAccessType, String> appViewACLs = new HashMap<ApplicationAccessType, String>();
    String appNodeLabelExpression = null;
    String amNodeLabelExpression = null;
    Map<String, Object> entityInfo = entity.getOtherInfo();
    if (entityInfo != null) {
        if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) {
            user = entityInfo.get(ApplicationMetricsConstants.USER_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO)) {
            String appViewACLsStr = entityInfo.get(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO).toString();
            if (appViewACLsStr.length() > 0) {
                appViewACLs.put(ApplicationAccessType.VIEW_APP, appViewACLsStr);
            }
        }
        if (field == ApplicationReportField.USER_AND_ACLS) {
            return new ApplicationReportExt(ApplicationReport.newInstance(ApplicationId.fromString(entity.getEntityId()), latestApplicationAttemptId, user, queue, name, null, -1, null, state, diagnosticsInfo, null, createdTime, submittedTime, 0, finishedTime, finalStatus, null, null, progress, type, null, appTags, unmanagedApplication, Priority.newInstance(applicationPriority), appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
            queue = entityInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.NAME_ENTITY_INFO)) {
            name = entityInfo.get(ApplicationMetricsConstants.NAME_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
            type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
            type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)) {
            unmanagedApplication = Boolean.parseBoolean(entityInfo.get(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO)) {
            applicationPriority = Integer.parseInt(entityInfo.get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION)) {
            appNodeLabelExpression = entityInfo.get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION)) {
            amNodeLabelExpression = entityInfo.get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION).toString();
        }
        submittedTime = parseLong(entityInfo, ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO);
        if (entityInfo.containsKey(ApplicationMetricsConstants.APP_CPU_METRICS)) {
            long vcoreSeconds = parseLong(entityInfo, ApplicationMetricsConstants.APP_CPU_METRICS);
            long memorySeconds = parseLong(entityInfo, ApplicationMetricsConstants.APP_MEM_METRICS);
            long preemptedMemorySeconds = parseLong(entityInfo, ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS);
            long preemptedVcoreSeconds = parseLong(entityInfo, ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS);
            Map<String, Long> resourceSecondsMap = new HashMap<>();
            Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
            resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
            resourceSecondsMap.put(ResourceInformation.VCORES.getName(), vcoreSeconds);
            preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), preemptedMemorySeconds);
            preemptedResourceSecondsMap.put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
            appResources = ApplicationResourceUsageReport.newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0, preemptedResourceSecondsMap);
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
            appTags = new HashSet<String>();
            Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
            if (obj != null && obj instanceof Collection<?>) {
                for (Object o : (Collection<?>) obj) {
                    if (o != null) {
                        appTags.add(o.toString());
                    }
                }
            }
        }
    }
    List<TimelineEvent> events = entity.getEvents();
    long updatedTimeStamp = 0L;
    if (events != null) {
        for (TimelineEvent event : events) {
            if (event.getEventType().equals(ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
                createdTime = event.getTimestamp();
            } else if (event.getEventType().equals(ApplicationMetricsConstants.LAUNCHED_EVENT_TYPE)) {
                launchTime = event.getTimestamp();
            } else if (event.getEventType().equals(ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
                if (event.getTimestamp() > updatedTimeStamp) {
                    updatedTimeStamp = event.getTimestamp();
                } else {
                    continue;
                }
                Map<String, Object> eventInfo = event.getEventInfo();
                if (eventInfo == null) {
                    continue;
                }
                applicationPriority = Integer.parseInt(eventInfo.get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
                queue = eventInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO).toString();
            } else if (event.getEventType().equals(ApplicationMetricsConstants.STATE_UPDATED_EVENT_TYPE)) {
                Map<String, Object> eventInfo = event.getEventInfo();
                if (eventInfo == null) {
                    continue;
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
                    if (!Apps.isApplicationFinalState(state)) {
                        state = YarnApplicationState.valueOf(eventInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
                    }
                }
            } else if (event.getEventType().equals(ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
                progress = 1.0F;
                finishedTime = event.getTimestamp();
                Map<String, Object> eventInfo = event.getEventInfo();
                if (eventInfo == null) {
                    continue;
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)) {
                    latestApplicationAttemptId = ApplicationAttemptId.fromString(eventInfo.get(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO).toString());
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
                    diagnosticsInfo = eventInfo.get(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO).toString();
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)) {
                    finalStatus = FinalApplicationStatus.valueOf(eventInfo.get(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO).toString());
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
                    state = YarnApplicationState.valueOf(eventInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
                }
            }
        }
    }
    return new ApplicationReportExt(ApplicationReport.newInstance(ApplicationId.fromString(entity.getEntityId()), latestApplicationAttemptId, user, queue, name, null, -1, null, state, diagnosticsInfo, null, createdTime, submittedTime, launchTime, finishedTime, finalStatus, appResources, null, progress, type, null, appTags, unmanagedApplication, Priority.newInstance(applicationPriority), appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
}
185121.7311298hadoop
 void testMetricFiltersParsing() throws Exception {
    String expr = "(((key11 ne 234 AND key12 gt 23) AND " + "(key13 lt 34 OR key14 ge 567)) OR (key21 lt 24 OR key22 le 45))";
    TimelineFilterList expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(Operator.AND, new TimelineFilterList(Operator.AND, new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "key11", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "key12", 23, true)), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key13", 34, true), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "key14", 567, true))), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key21", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "key22", 45, true)));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ene 234";
    expectedList = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ne 234";
    expectedList = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ne 234 AND def gt 23";
    expectedList = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "(abc ne 234 AND def gt 23)";
    expectedList = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ne 234 AND def gt 23 OR rst lt 24";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "rst", 24, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ne 234 AND def gt 23 OR rst lt 24 OR xyz le 456";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "rst", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "xyz", 456, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "abc ne 234 AND def gt 23 OR rst lt 24 OR xyz le 456 AND pqr ge 2";
    expectedList = new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "rst", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "xyz", 456, true)), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "pqr", 2, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "  abc ne   234       AND       def           gt 23 OR     rst lt " + "           24     OR xyz     le     456    AND pqr ge 2        ";
    expectedList = new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "abc", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "def", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "rst", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "xyz", 456, true)), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "pqr", 2, true));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "(((key11 ne 234 AND key12 gt 23 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(Operator.AND, new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "key11", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "key12", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key13", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "key14", 456, true)), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "key15", 2, true)), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key16", 34, true), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "key17", 567, true))), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key21", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "key22", 45, true)));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "   (  (     (        key11      ne     234    AND key12    gt   " + "23    OR    key13    lt    24 OR key14 le 456   AND   key15   ge   2" + "   )   AND ( key16 lt 34 OR key17 ge 567 )    )     OR " + "(   key21 lt 24 OR key22 le 45 )   )    ";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(Operator.AND, new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "key11", 234, false), new TimelineCompareFilter(TimelineCompareOp.GREATER_THAN, "key12", 23, true)), new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key13", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "key14", 456, true)), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "key15", 2, true)), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key16", 34, true), new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "key17", 567, true))), new TimelineFilterList(Operator.OR, new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "key21", 24, true), new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "key22", 45, true)));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseMetricFilters(expr), expectedList);
    expr = "(((key11 ne 234 AND key12 gt 23 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45)";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Improper brackers. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((key11 ne 234 AND key12 gt v3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Non Numeric value. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((key11 ne (234 AND key12 gt 3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Unexpected opening bracket. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((k)ey11 ne 234 AND key12 gt 3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Unexpected closing bracket. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((key11 rs 234 AND key12 gt 3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Improper compare op. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((key11 ne 234 PI key12 gt 3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Improper op. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(((key11 ne 234 PI key12 gt 3 OR key13 lt 24 OR key14 le 456 " + "AND key15 ge 2) AND (key16 lt 34 OR key17 ge 567)) OR (key21 lt 24 " + "OR key22 le 45))";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Improper op. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(key11 ne 234 AND key12 gt 3)) OR (key13 lt 24 OR key14 le 456)";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Unbalanced brackets. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(key11 rne 234 AND key12 gt 3) OR (key13 lt 24 OR key14 le 456)";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Invalid compareop. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    expr = "(key11 ne 234 AND key12 gt 3) OR (key13 lt 24 OR key14 le";
    try {
        TimelineReaderWebServicesUtils.parseMetricFilters(expr);
        fail("Compareop cant be parsed. Exception should have been thrown.");
    } catch (TimelineParseException e) {
    }
    assertNull(TimelineReaderWebServicesUtils.parseMetricFilters(null));
    assertNull(TimelineReaderWebServicesUtils.parseMetricFilters("   "));
}
189576.193271hadoop
private static void loadData() throws Exception {
    String cluster = "cluster1";
    String user = "user1";
    String flow = "flow_name";
    String flowVersion = "CF7022C10F1354";
    Long runid = 1002345678919L;
    Long runid1 = 1002345678920L;
    TimelineEntities te = new TimelineEntities();
    TimelineEntity entity = new TimelineEntity();
    String id = "application_1111111111_1111";
    String type = TimelineEntityType.YARN_APPLICATION.toString();
    entity.setId(id);
    entity.setType(type);
    Long cTime = 1425016501000L;
    entity.setCreatedTime(cTime);
    entity.addConfig("cfg2", "value1");
    Set<TimelineMetric> metrics = new HashSet<>();
    TimelineMetric m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    Map<Long, Number> metricValues = ImmutableMap.of(ts - 100000, (Number) 2, ts - 90000, 7, ts - 80000, 40);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    m1 = new TimelineMetric();
    m1.setId("MAP1_SLOT_MILLIS");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 2, ts - 90000, 9, ts - 80000, 40);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    m1 = new TimelineMetric();
    m1.setId("HDFS_BYTES_READ");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 31, ts - 80000, 57);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    entity.addMetrics(metrics);
    TimelineEvent event = new TimelineEvent();
    event.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    event.setTimestamp(cTime);
    String expKey = "foo_event";
    Object expVal = "test";
    event.addInfo(expKey, expVal);
    entity.addEvent(event);
    TimelineEvent event11 = new TimelineEvent();
    event11.setId(ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
    Long expTs = 1425019501000L;
    event11.setTimestamp(expTs);
    entity.addEvent(event11);
    te.addEntity(entity);
    TimelineEntities te1 = new TimelineEntities();
    TimelineEntity entity1 = new TimelineEntity();
    id = "application_1111111111_2222";
    type = TimelineEntityType.YARN_APPLICATION.toString();
    entity1.setId(id);
    entity1.setType(type);
    cTime = 1425016501000L;
    entity1.setCreatedTime(cTime);
    entity1.addConfig("cfg1", "value1");
    metrics.clear();
    TimelineMetric m2 = new TimelineMetric();
    m2.setId("MAP_SLOT_MILLIS");
    metricValues = new HashMap<Long, Number>();
    metricValues.put(ts - 100000, 5L);
    metricValues.put(ts - 80000, 101L);
    m2.setType(Type.TIME_SERIES);
    m2.setValues(metricValues);
    metrics.add(m2);
    entity1.addMetrics(metrics);
    TimelineEvent event1 = new TimelineEvent();
    event1.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    event1.setTimestamp(cTime);
    event1.addInfo(expKey, expVal);
    entity1.addEvent(event1);
    te1.addEntity(entity1);
    String flow2 = "flow_name2";
    String flowVersion2 = "CF7022C10F1454";
    Long runid2 = 2102356789046L;
    TimelineEntities te3 = new TimelineEntities();
    TimelineEntity entity3 = new TimelineEntity();
    id = "application_11111111111111_2223";
    entity3.setId(id);
    entity3.setType(type);
    cTime = 1425016501037L;
    entity3.setCreatedTime(cTime);
    TimelineEvent event2 = new TimelineEvent();
    event2.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    event2.setTimestamp(cTime);
    event2.addInfo("foo_event", "test");
    entity3.addEvent(event2);
    te3.addEntity(entity3);
    TimelineEntities te4 = new TimelineEntities();
    TimelineEntity entity4 = new TimelineEntity();
    id = "application_1111111111_2224";
    entity4.setId(id);
    entity4.setType(type);
    cTime = 1425016501034L;
    entity4.setCreatedTime(cTime);
    TimelineEvent event4 = new TimelineEvent();
    event4.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    event4.setTimestamp(cTime);
    event4.addInfo("foo_event", "test");
    entity4.addEvent(event4);
    metrics.clear();
    m2 = new TimelineMetric();
    m2.setId("MAP_SLOT_MILLIS");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 5L, ts - 80000, 101L);
    m2.setType(Type.TIME_SERIES);
    m2.setValues(metricValues);
    metrics.add(m2);
    entity4.addMetrics(metrics);
    te4.addEntity(entity4);
    TimelineEntities userEntities = new TimelineEntities();
    TimelineEntity entity5 = new TimelineEntity();
    entity5.setId("entity1");
    entity5.setType("type1");
    entity5.setCreatedTime(1425016501034L);
    entity5.addConfigs(ImmutableMap.of("config_param1", "value1", "config_param2", "value2", "cfg_param1", "value3"));
    entity5.addInfo(ImmutableMap.of("info1", (Object) "cluster1", "info2", 2.0, "info3", 35000, "info4", 36000));
    metrics = new HashSet<>();
    m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 2, ts - 80000, 40);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    m1 = new TimelineMetric();
    m1.setId("HDFS_BYTES_READ");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 31, ts - 80000, 57);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    entity5.addMetrics(metrics);
    TimelineEvent event51 = new TimelineEvent();
    event51.setId("event1");
    event51.setTimestamp(cTime);
    entity5.addEvent(event51);
    TimelineEvent event52 = new TimelineEvent();
    event52.setId("event2");
    event52.setTimestamp(cTime);
    entity5.addEvent(event52);
    TimelineEvent event53 = new TimelineEvent();
    event53.setId("event3");
    event53.setTimestamp(cTime);
    entity5.addEvent(event53);
    TimelineEvent event54 = new TimelineEvent();
    event54.setId("event4");
    event54.setTimestamp(cTime);
    entity5.addEvent(event54);
    Map<String, Set<String>> isRelatedTo1 = new HashMap<String, Set<String>>();
    isRelatedTo1.put("type2", new HashSet<>(Arrays.asList("entity21", "entity22", "entity23", "entity24")));
    isRelatedTo1.put("type4", new HashSet<>(Arrays.asList("entity41", "entity42")));
    isRelatedTo1.put("type1", new HashSet<>(Arrays.asList("entity14", "entity15")));
    isRelatedTo1.put("type3", new HashSet<>(Arrays.asList("entity31", "entity35", "entity32", "entity33")));
    entity5.addIsRelatedToEntities(isRelatedTo1);
    Map<String, Set<String>> relatesTo1 = new HashMap<String, Set<String>>();
    relatesTo1.put("type2", new HashSet<>(Arrays.asList("entity21", "entity22", "entity23", "entity24")));
    relatesTo1.put("type4", new HashSet<>(Arrays.asList("entity41", "entity42")));
    relatesTo1.put("type1", new HashSet<>(Arrays.asList("entity14", "entity15")));
    relatesTo1.put("type3", new HashSet<>(Arrays.asList("entity31", "entity35", "entity32", "entity33")));
    entity5.addRelatesToEntities(relatesTo1);
    userEntities.addEntity(new SubApplicationEntity(entity5));
    TimelineEntity entity6 = new TimelineEntity();
    entity6.setId("entity2");
    entity6.setType("type1");
    entity6.setCreatedTime(1425016501034L);
    entity6.addConfigs(ImmutableMap.of("cfg_param3", "value1", "configuration_param2", "value2", "config_param1", "value3"));
    entity6.addInfo(ImmutableMap.of("info1", (Object) "cluster2", "info2", 2.0, "info4", 35000));
    metrics = new HashSet<>();
    m1 = new TimelineMetric();
    m1.setId("MAP1_SLOT_MILLIS");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 12, ts - 80000, 140);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    m1 = new TimelineMetric();
    m1.setId("HDFS_BYTES_READ");
    metricValues = ImmutableMap.of(ts - 100000, (Number) 78, ts - 80000, 157);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    m1 = new TimelineMetric();
    m1.setId("MAP11_SLOT_MILLIS");
    m1.setType(Type.SINGLE_VALUE);
    m1.addValue(ts - 100000, 122);
    metrics.add(m1);
    entity6.addMetrics(metrics);
    TimelineEvent event61 = new TimelineEvent();
    event61.setId("event1");
    event61.setTimestamp(cTime);
    entity6.addEvent(event61);
    TimelineEvent event62 = new TimelineEvent();
    event62.setId("event5");
    event62.setTimestamp(cTime);
    entity6.addEvent(event62);
    TimelineEvent event63 = new TimelineEvent();
    event63.setId("event3");
    event63.setTimestamp(cTime);
    entity6.addEvent(event63);
    TimelineEvent event64 = new TimelineEvent();
    event64.setId("event6");
    event64.setTimestamp(cTime);
    entity6.addEvent(event64);
    Map<String, Set<String>> isRelatedTo2 = new HashMap<String, Set<String>>();
    isRelatedTo2.put("type2", new HashSet<>(Arrays.asList("entity21", "entity22", "entity23", "entity24")));
    isRelatedTo2.put("type5", new HashSet<>(Arrays.asList("entity51", "entity52")));
    isRelatedTo2.put("type6", new HashSet<>(Arrays.asList("entity61", "entity66")));
    isRelatedTo2.put("type3", new HashSet<>(Collections.singletonList("entity31")));
    entity6.addIsRelatedToEntities(isRelatedTo2);
    Map<String, Set<String>> relatesTo2 = new HashMap<String, Set<String>>();
    relatesTo2.put("type2", new HashSet<>(Arrays.asList("entity21", "entity22", "entity23", "entity24")));
    relatesTo2.put("type5", new HashSet<>(Arrays.asList("entity51", "entity52")));
    relatesTo2.put("type6", new HashSet<>(Arrays.asList("entity61", "entity66")));
    relatesTo2.put("type3", new HashSet<>(Collections.singletonList("entity31")));
    entity6.addRelatesToEntities(relatesTo2);
    userEntities.addEntity(new SubApplicationEntity(entity6));
    for (long i = 1; i <= 10; i++) {
        TimelineEntity userEntity = new TimelineEntity();
        userEntity.setType("entitytype");
        userEntity.setId("entityid-" + i);
        userEntity.setIdPrefix(11 - i);
        userEntity.setCreatedTime(ts);
        userEntities.addEntity(new SubApplicationEntity(userEntity));
    }
    HBaseTimelineWriterImpl hbi = null;
    Configuration c1 = getHBaseTestingUtility().getConfiguration();
    UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(doAsUser);
    try {
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(c1);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, entity.getId()), te, remoteUser);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, entity1.getId()), te1, remoteUser);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid1, entity4.getId()), te4, remoteUser);
        hbi.write(new TimelineCollectorContext(cluster, user, flow2, flowVersion2, runid2, entity3.getId()), te3, remoteUser);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, "application_1111111111_1111"), userEntities, remoteUser);
        writeApplicationEntities(hbi, ts);
        hbi.flush();
    } finally {
        if (hbi != null) {
            hbi.close();
        }
    }
}
188553.916237hadoop
public void testWriteApplicationToHBase() throws Exception {
    TimelineEntities te = new TimelineEntities();
    ApplicationEntity entity = new ApplicationEntity();
    String appId = "application_1000178881110_2002";
    entity.setId(appId);
    Long cTime = 1425016501000L;
    entity.setCreatedTime(cTime);
    Map<String, Object> infoMap = new HashMap<String, Object>();
    infoMap.put("infoMapKey1", "infoMapValue1");
    infoMap.put("infoMapKey2", 10);
    entity.addInfo(infoMap);
    String key = "task";
    String value = "is_related_to_entity_id_here";
    Set<String> isRelatedToSet = new HashSet<String>();
    isRelatedToSet.add(value);
    Map<String, Set<String>> isRelatedTo = new HashMap<String, Set<String>>();
    isRelatedTo.put(key, isRelatedToSet);
    entity.setIsRelatedToEntities(isRelatedTo);
    key = "container";
    value = "relates_to_entity_id_here";
    Set<String> relatesToSet = new HashSet<String>();
    relatesToSet.add(value);
    value = "relates_to_entity_id_here_Second";
    relatesToSet.add(value);
    Map<String, Set<String>> relatesTo = new HashMap<String, Set<String>>();
    relatesTo.put(key, relatesToSet);
    entity.setRelatesToEntities(relatesTo);
    Map<String, String> conf = new HashMap<String, String>();
    conf.put("config_param1", "value1");
    conf.put("config_param2", "value2");
    entity.addConfigs(conf);
    Set<TimelineMetric> metrics = new HashSet<>();
    TimelineMetric m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    Map<Long, Number> metricValues = new HashMap<Long, Number>();
    metricValues.put(CURRENT_TIME - 120000, 100000000);
    metricValues.put(CURRENT_TIME - 100000, 200000000);
    metricValues.put(CURRENT_TIME - 80000, 300000000);
    metricValues.put(CURRENT_TIME - 60000, 400000000);
    metricValues.put(CURRENT_TIME - 40000, 50000000000L);
    metricValues.put(CURRENT_TIME - 20000, 60000000000L);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    entity.addMetrics(metrics);
    TimelineEntity aggEntity = new TimelineEntity();
    String type = TimelineEntityType.YARN_APPLICATION.toString();
    aggEntity.setId(appId);
    aggEntity.setType(type);
    long cTime2 = 1425016502000L;
    aggEntity.setCreatedTime(cTime2);
    TimelineMetric aggMetric = new TimelineMetric();
    aggMetric.setId("MEM_USAGE");
    Map<Long, Number> aggMetricValues = new HashMap<Long, Number>();
    long aggTs = CURRENT_TIME;
    aggMetricValues.put(aggTs - 120000, 102400000L);
    aggMetric.setType(Type.SINGLE_VALUE);
    aggMetric.setRealtimeAggregationOp(TimelineMetricOperation.SUM);
    aggMetric.setValues(aggMetricValues);
    Set<TimelineMetric> aggMetrics = new HashSet<>();
    aggMetrics.add(aggMetric);
    entity.addMetrics(aggMetrics);
    te.addEntity(entity);
    HBaseTimelineWriterImpl hbi = null;
    try {
        Configuration c1 = util.getConfiguration();
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(c1);
        hbi.start();
        String cluster = "cluster_test_write_app";
        String user = "user1";
        String flow = "s!ome_f\tlow  _n am!e";
        String flowVersion = "AB7822C10F1111";
        long runid = 1002345678919L;
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appId), te, UserGroupInformation.createRemoteUser(user));
        entity = new ApplicationEntity();
        appId = "application_1000178881110_2002";
        entity.setId(appId);
        Map<String, Object> infoMap1 = new HashMap<>();
        infoMap1.put("infoMapKey3", "infoMapValue1");
        entity.addInfo(infoMap1);
        te = new TimelineEntities();
        te.addEntity(entity);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appId), te, UserGroupInformation.createRemoteUser(user));
        hbi.stop();
        infoMap.putAll(infoMap1);
        ApplicationRowKey applicationRowKey = new ApplicationRowKey(cluster, user, flow, runid, appId);
        byte[] rowKey = applicationRowKey.getRowKey();
        Get get = new Get(rowKey);
        get.setMaxVersions(Integer.MAX_VALUE);
        Connection conn = ConnectionFactory.createConnection(c1);
        Result result = new ApplicationTableRW().getResult(c1, conn, get);
        assertTrue(result != null);
        assertEquals(17, result.size());
        byte[] row1 = result.getRow();
        assertTrue(isApplicationRowKeyCorrect(row1, cluster, user, flow, runid, appId));
        String id1 = ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString();
        assertEquals(appId, id1);
        Long cTime1 = (Long) ColumnRWHelper.readResult(result, ApplicationColumn.CREATED_TIME);
        assertEquals(cTime, cTime1);
        Map<String, Object> infoColumns = ColumnRWHelper.readResults(result, ApplicationColumnPrefix.INFO, new StringKeyConverter());
        assertEquals(infoMap, infoColumns);
        for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo.entrySet()) {
            Object isRelatedToValue = ColumnRWHelper.readResult(result, ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToEntry.getKey());
            String compoundValue = isRelatedToValue.toString();
            Set<String> isRelatedToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
            assertEquals(isRelatedTo.get(isRelatedToEntry.getKey()).size(), isRelatedToValues.size());
            for (String v : isRelatedToEntry.getValue()) {
                assertTrue(isRelatedToValues.contains(v));
            }
        }
        for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo.entrySet()) {
            String compoundValue = ColumnRWHelper.readResult(result, ApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey()).toString();
            Set<String> relatesToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
            assertEquals(relatesTo.get(relatesToEntry.getKey()).size(), relatesToValues.size());
            for (String v : relatesToEntry.getValue()) {
                assertTrue(relatesToValues.contains(v));
            }
        }
        KeyConverter<String> stringKeyConverter = new StringKeyConverter();
        Map<String, Object> configColumns = ColumnRWHelper.readResults(result, ApplicationColumnPrefix.CONFIG, stringKeyConverter);
        assertEquals(conf, configColumns);
        NavigableMap<String, NavigableMap<Long, Number>> metricsResult = ColumnRWHelper.readResultsWithTimestamps(result, ApplicationColumnPrefix.METRIC, stringKeyConverter);
        NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
        matchMetrics(metricValues, metricMap);
        TimelineEntity e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE, null, null));
        assertNotNull(e1);
        assertEquals(appId, e1.getId());
        assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType());
        assertEquals(cTime, e1.getCreatedTime());
        Map<String, Object> infoMap2 = e1.getInfo();
        infoMap2.remove("FROM_ID");
        assertEquals(infoMap, infoMap2);
        Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
        assertEquals(isRelatedTo, isRelatedTo2);
        Map<String, Set<String>> relatesTo2 = e1.getRelatesToEntities();
        assertEquals(relatesTo, relatesTo2);
        Map<String, String> conf2 = e1.getConfigs();
        assertEquals(conf, conf2);
        Set<TimelineMetric> metrics2 = e1.getMetrics();
        assertEquals(2, metrics2.size());
        for (TimelineMetric metric2 : metrics2) {
            Map<Long, Number> metricValues2 = metric2.getValues();
            assertTrue(metric2.getId().equals("MAP_SLOT_MILLIS") || metric2.getId().equals("MEM_USAGE"));
            if (metric2.getId().equals("MAP_SLOT_MILLIS")) {
                assertEquals(6, metricValues2.size());
                matchMetrics(metricValues, metricValues2);
            }
            if (metric2.getId().equals("MEM_USAGE")) {
                assertEquals(1, metricValues2.size());
                matchMetrics(aggMetricValues, metricValues2);
            }
        }
        e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(TimelineReader.Field.ALL), 3, null, null));
        assertNotNull(e1);
        assertEquals(appId, e1.getId());
        assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType());
        assertEquals(conf, e1.getConfigs());
        metrics2 = e1.getMetrics();
        assertEquals(2, metrics2.size());
        for (TimelineMetric metric2 : metrics2) {
            Map<Long, Number> metricValues2 = metric2.getValues();
            assertTrue(metricValues2.size() <= 3);
            assertTrue(metric2.getId().equals("MAP_SLOT_MILLIS") || metric2.getId().equals("MEM_USAGE"));
        }
        e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appId, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(TimelineReader.Field.ALL), null, null, null));
        assertNotNull(e1);
        assertEquals(appId, e1.getId());
        assertEquals(TimelineEntityType.YARN_APPLICATION.toString(), e1.getType());
        assertEquals(cTime, e1.getCreatedTime());
        infoMap2 = e1.getInfo();
        infoMap2.remove("FROM_ID");
        assertEquals(infoMap, e1.getInfo());
        assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
        assertEquals(relatesTo, e1.getRelatesToEntities());
        assertEquals(conf, e1.getConfigs());
        assertEquals(2, e1.getMetrics().size());
        for (TimelineMetric metric : e1.getMetrics()) {
            assertEquals(1, metric.getValues().size());
            assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType());
            assertTrue(metric.getId().equals("MAP_SLOT_MILLIS") || metric.getId().equals("MEM_USAGE"));
            assertEquals(1, metric.getValues().size());
            if (metric.getId().equals("MAP_SLOT_MILLIS")) {
                assertTrue(metric.getValues().containsKey(CURRENT_TIME - 20000));
                assertEquals(metricValues.get(CURRENT_TIME - 20000), metric.getValues().get(CURRENT_TIME - 20000));
            }
            if (metric.getId().equals("MEM_USAGE")) {
                assertTrue(metric.getValues().containsKey(aggTs - 120000));
                assertEquals(aggMetricValues.get(aggTs - 120000), metric.getValues().get(aggTs - 120000));
            }
        }
    } finally {
        if (hbi != null) {
            hbi.stop();
            hbi.close();
        }
    }
}
183450.1344204wildfly
private void parseDataSource_4_0(final XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final DataSource.Attribute attribute = DataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JTA:
                {
                    final String value = rawAttributeText(reader, JTA.getXmlName());
                    if (value != null) {
                        JTA.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case MCP:
                {
                    final String value = rawAttributeText(reader, MCP.getXmlName());
                    if (value != null) {
                        MCP.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case ENLISTMENT_TRACE:
                {
                    final String value = rawAttributeText(reader, ENLISTMENT_TRACE.getXmlName());
                    ENLISTMENT_TRACE.parseAndSetParameter(value, operation, reader);
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(DATA_SOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> configPropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.DATASOURCE) {
                        list.add(operation);
                        list.addAll(configPropertiesOperations);
                        return;
                    } else {
                        if (DataSource.Tag.forName(reader.getLocalName()) == DataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(DataSource.Tag.forName(reader.getLocalName())) {
                        case CONNECTION_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(CONNECTION_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                CONNECTION_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                configPropertiesOperations.add(configOperation);
                                break;
                            }
                        case CONNECTION_URL:
                            {
                                String value = rawElementText(reader);
                                CONNECTION_URL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER_CLASS:
                            {
                                String value = rawElementText(reader);
                                DRIVER_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case POOL:
                            {
                                parsePool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                switch(Namespace.forUri(reader.getNamespaceURI())) {
                                    case DATASOURCES_4_0:
                                        parseDsSecurity(reader, operation);
                                        break;
                                    default:
                                        parseDsSecurity_5_0(reader, operation);
                                        break;
                                }
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
183516.3647208wildfly
private void parseDataSource_7_0(final XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final DataSource.Attribute attribute = DataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JTA:
                {
                    final String value = rawAttributeText(reader, JTA.getXmlName());
                    if (value != null) {
                        JTA.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case MCP:
                {
                    final String value = rawAttributeText(reader, MCP.getXmlName());
                    if (value != null) {
                        MCP.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case ENLISTMENT_TRACE:
                {
                    final String value = rawAttributeText(reader, ENLISTMENT_TRACE.getXmlName());
                    ENLISTMENT_TRACE.parseAndSetParameter(value, operation, reader);
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(DATA_SOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> configPropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.DATASOURCE) {
                        list.add(operation);
                        list.addAll(configPropertiesOperations);
                        return;
                    } else {
                        if (DataSource.Tag.forName(reader.getLocalName()) == DataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(DataSource.Tag.forName(reader.getLocalName())) {
                        case CONNECTION_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(CONNECTION_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                CONNECTION_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                configPropertiesOperations.add(configOperation);
                                break;
                            }
                        case CONNECTION_URL:
                            {
                                String value = rawElementText(reader);
                                CONNECTION_URL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER_CLASS:
                            {
                                String value = rawElementText(reader);
                                DRIVER_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case POOL:
                            {
                                parsePool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                switch(Namespace.forUri(reader.getNamespaceURI())) {
                                    case DATASOURCES_4_0:
                                        parseDsSecurity(reader, operation);
                                        break;
                                    case DATASOURCES_5_0:
                                    case DATASOURCES_6_0:
                                    case DATASOURCES_7_0:
                                        parseDsSecurity_5_0(reader, operation);
                                        break;
                                    default:
                                        parseDsSecurity_7_1(reader, operation);
                                }
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSetting_7_0(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
193789.4936202cassandra
private void fuzzOnce() throws Exception {
    setUp();
    int iteration = 0;
    int bytesChecked = 0;
    int action = 0;
    while (generated.size() < 1024 * 1024 * 8) {
        action = r.nextInt(21);
        iteration++;
        switch(action) {
            case 0:
                {
                    generated.flush();
                    dosp.flush();
                    break;
                }
            case 1:
                {
                    int val = r.nextInt();
                    dosp.write(val);
                    ndosp.write(val);
                    break;
                }
            case 2:
                {
                    byte[] randomBytes = new byte[r.nextInt(4096 * 2 + 1)];
                    r.nextBytes(randomBytes);
                    dosp.write(randomBytes);
                    ndosp.write(randomBytes);
                    break;
                }
            case 3:
                {
                    byte[] randomBytes = new byte[r.nextInt(4096 * 2 + 1)];
                    r.nextBytes(randomBytes);
                    int offset = randomBytes.length == 0 ? 0 : r.nextInt(randomBytes.length);
                    int length = randomBytes.length == 0 ? 0 : r.nextInt(randomBytes.length - offset);
                    dosp.write(randomBytes, offset, length);
                    ndosp.write(randomBytes, offset, length);
                    break;
                }
            case 4:
                {
                    boolean val = r.nextInt(2) == 0;
                    dosp.writeBoolean(val);
                    ndosp.writeBoolean(val);
                    break;
                }
            case 5:
                {
                    int val = r.nextInt();
                    dosp.writeByte(val);
                    ndosp.writeByte(val);
                    break;
                }
            case 6:
                {
                    int val = r.nextInt();
                    dosp.writeShort(val);
                    ndosp.writeShort(val);
                    break;
                }
            case 7:
                {
                    int val = r.nextInt();
                    dosp.writeChar(val);
                    ndosp.writeChar(val);
                    break;
                }
            case 8:
                {
                    int val = r.nextInt();
                    dosp.writeInt(val);
                    ndosp.writeInt(val);
                    break;
                }
            case 9:
                {
                    int val = r.nextInt();
                    dosp.writeLong(val);
                    ndosp.writeLong(val);
                    break;
                }
            case 10:
                {
                    float val = r.nextFloat();
                    dosp.writeFloat(val);
                    ndosp.writeFloat(val);
                    break;
                }
            case 11:
                {
                    double val = r.nextDouble();
                    dosp.writeDouble(val);
                    ndosp.writeDouble(val);
                    break;
                }
            case 12:
                {
                    dosp.writeBytes(simple);
                    ndosp.writeBytes(simple);
                    break;
                }
            case 13:
                {
                    dosp.writeChars(twoByte);
                    ndosp.writeChars(twoByte);
                    break;
                }
            case 14:
                {
                    StringBuilder sb = new StringBuilder();
                    int length = r.nextInt(500);
                    if (r.nextDouble() > .95)
                        length += 4000;
                    sb.append(simple + twoByte + threeByte + fourByte);
                    for (int ii = 0; ii < length; ii++) {
                        sb.append((char) (r.nextInt() & 0xffff));
                    }
                    String str = sb.toString();
                    writeUTFLegacy(str, dosp);
                    ndosp.writeUTF(str);
                    break;
                }
            case 15:
                {
                    StringBuilder sb = new StringBuilder();
                    int length = r.nextInt(500);
                    sb.append("the very model of a modern major general familiar with all things animal vegetable and mineral");
                    for (int ii = 0; ii < length; ii++) {
                        sb.append(' ');
                    }
                    String str = sb.toString();
                    writeUTFLegacy(str, dosp);
                    ndosp.writeUTF(str);
                    break;
                }
            case 16:
                {
                    ByteBuffer buf = ByteBuffer.allocate(r.nextInt(1024 * 8 + 1));
                    r.nextBytes(buf.array());
                    buf.position(buf.capacity() == 0 ? 0 : r.nextInt(buf.capacity()));
                    buf.limit(buf.position() + (buf.capacity() - buf.position() == 0 ? 0 : r.nextInt(buf.capacity() - buf.position())));
                    ByteBuffer dup = buf.duplicate();
                    ndosp.write(buf.duplicate());
                    assertEquals(dup.position(), buf.position());
                    assertEquals(dup.limit(), buf.limit());
                    dosp.write(buf.duplicate());
                    break;
                }
            case 17:
                {
                    ByteBuffer buf = ByteBuffer.allocateDirect(r.nextInt(1024 * 8 + 1));
                    while (buf.hasRemaining()) buf.put((byte) r.nextInt());
                    buf.position(buf.capacity() == 0 ? 0 : r.nextInt(buf.capacity()));
                    buf.limit(buf.position() + (buf.capacity() - buf.position() == 0 ? 0 : r.nextInt(buf.capacity() - buf.position())));
                    ByteBuffer dup = buf.duplicate();
                    ndosp.write(buf.duplicate());
                    assertEquals(dup.position(), buf.position());
                    assertEquals(dup.limit(), buf.limit());
                    dosp.write(buf.duplicate());
                    break;
                }
            case 18:
                {
                    try (Memory buf = Memory.allocate(r.nextInt(1024 * 8 - 1) + 1)) {
                        for (int ii = 0; ii < buf.size(); ii++) buf.setByte(ii, (byte) r.nextInt());
                        long offset = buf.size() == 0 ? 0 : r.nextInt((int) buf.size());
                        long length = (buf.size() - offset == 0 ? 0 : r.nextInt((int) (buf.size() - offset)));
                        ndosp.write(buf, offset, length);
                        dosp.write(buf, offset, length);
                    }
                    break;
                }
            case 19:
                {
                    long val = r.nextLong();
                    VIntCoding.writeVInt(val, dosp);
                    ndosp.writeVInt(val);
                    break;
                }
            case 20:
                {
                    long val = r.nextLong();
                    VIntCoding.writeUnsignedVInt(val, dosp);
                    ndosp.writeUnsignedVInt(val);
                    break;
                }
            default:
                fail("Shouldn't reach here");
        }
    }
    assertSameOutput(0, -1, iteration);
}
193703.8955159elasticsearch
public void visitComparison(ComparisonNode irComparisonNode, Consumer<ExpressionNode> scope) {
    irComparisonNode.getLeftNode().visit(this, irComparisonNode::setLeftNode);
    irComparisonNode.getRightNode().visit(this, irComparisonNode::setRightNode);
    if ((irComparisonNode.getLeftNode() instanceof ConstantNode || irComparisonNode.getLeftNode() instanceof NullNode) && (irComparisonNode.getRightNode() instanceof ConstantNode || irComparisonNode.getRightNode() instanceof NullNode)) {
        ExpressionNode irLeftConstantNode = irComparisonNode.getLeftNode() instanceof NullNode ? null : irComparisonNode.getLeftNode();
        ExpressionNode irRightConstantNode = irComparisonNode.getRightNode() instanceof NullNode ? null : irComparisonNode.getRightNode();
        Object leftConstantValue = irLeftConstantNode == null ? null : irLeftConstantNode.getDecorationValue(IRDConstant.class);
        Object rightConstantValue = irRightConstantNode == null ? null : irRightConstantNode.getDecorationValue(IRDConstant.class);
        Operation operation = irComparisonNode.getDecorationValue(IRDOperation.class);
        Class<?> type = irComparisonNode.getDecorationValue(IRDComparisonType.class);
        if (operation == Operation.EQ || operation == Operation.EQR) {
            if (irLeftConstantNode == null && irRightConstantNode == null) {
                irLeftConstantNode = new ConstantNode(irComparisonNode.getLeftNode().getLocation());
                irLeftConstantNode.attachDecoration(new IRDConstant(true));
            } else if (irLeftConstantNode == null || irRightConstantNode == null) {
                irLeftConstantNode = new ConstantNode(irComparisonNode.getLeftNode().getLocation());
                irLeftConstantNode.attachDecoration(new IRDConstant(false));
            } else if (type == boolean.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((boolean) leftConstantValue == (boolean) rightConstantValue));
            } else if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue == (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue == (long) rightConstantValue));
            } else if (type == float.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue == (float) rightConstantValue));
            } else if (type == double.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue == (double) rightConstantValue));
            } else {
                if (operation == Operation.EQ) {
                    irLeftConstantNode.attachDecoration(new IRDConstant(leftConstantValue.equals(rightConstantValue)));
                } else {
                    irLeftConstantNode.attachDecoration(new IRDConstant(leftConstantValue == rightConstantValue));
                }
            }
            irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
            scope.accept(irLeftConstantNode);
        } else if (operation == Operation.NE || operation == Operation.NER) {
            if (irLeftConstantNode == null && irRightConstantNode == null) {
                irLeftConstantNode = new ConstantNode(irComparisonNode.getLeftNode().getLocation());
                irLeftConstantNode.attachDecoration(new IRDConstant(false));
            } else if (irLeftConstantNode == null || irRightConstantNode == null) {
                irLeftConstantNode = new ConstantNode(irComparisonNode.getLeftNode().getLocation());
                irLeftConstantNode.attachDecoration(new IRDConstant(true));
            } else if (type == boolean.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((boolean) leftConstantValue != (boolean) rightConstantValue));
            } else if (type == int.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue != (int) rightConstantValue));
            } else if (type == long.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue != (long) rightConstantValue));
            } else if (type == float.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue != (float) rightConstantValue));
            } else if (type == double.class) {
                irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue != (double) rightConstantValue));
            } else {
                if (operation == Operation.NE) {
                    irLeftConstantNode.attachDecoration(new IRDConstant(leftConstantValue.equals(rightConstantValue) == false));
                } else {
                    irLeftConstantNode.attachDecoration(new IRDConstant(leftConstantValue != rightConstantValue));
                }
            }
            irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
            scope.accept(irLeftConstantNode);
        } else if (irLeftConstantNode != null && irRightConstantNode != null) {
            if (operation == Operation.GT) {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue > (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue > (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue > (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue > (double) rightConstantValue));
                } else {
                    throw irComparisonNode.getLocation().createError(comparisonError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
                irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
                scope.accept(irLeftConstantNode);
            } else if (operation == Operation.GTE) {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue >= (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue >= (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue >= (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue >= (double) rightConstantValue));
                } else {
                    throw irComparisonNode.getLocation().createError(comparisonError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
                irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
                scope.accept(irLeftConstantNode);
            } else if (operation == Operation.LT) {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue < (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue < (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue < (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue < (double) rightConstantValue));
                } else {
                    throw irComparisonNode.getLocation().createError(comparisonError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
                irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
                scope.accept(irLeftConstantNode);
            } else if (operation == Operation.LTE) {
                if (type == int.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((int) leftConstantValue <= (int) rightConstantValue));
                } else if (type == long.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((long) leftConstantValue <= (long) rightConstantValue));
                } else if (type == float.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((float) leftConstantValue <= (float) rightConstantValue));
                } else if (type == double.class) {
                    irLeftConstantNode.attachDecoration(new IRDConstant((double) leftConstantValue <= (double) rightConstantValue));
                } else {
                    throw irComparisonNode.getLocation().createError(comparisonError(PainlessLookupUtility.typeToCanonicalTypeName(type), operation.symbol, irLeftConstantNode.getDecorationString(IRDConstant.class), irRightConstantNode.getDecorationString(IRDConstant.class)));
                }
                irLeftConstantNode.attachDecoration(new IRDExpressionType(boolean.class));
                scope.accept(irLeftConstantNode);
            }
        }
    }
}
195257.143308elasticsearch
private ValuesSourceRegistry registerAggregations(List<SearchPlugin> plugins) {
    ValuesSourceRegistry.Builder builder = new ValuesSourceRegistry.Builder(telemetryProvider.getMeterRegistry());
    registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, AvgAggregationBuilder.PARSER).addResultReader(InternalAvg::new).setAggregatorRegistrar(AvgAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(WeightedAvgAggregationBuilder.NAME, WeightedAvgAggregationBuilder::new, WeightedAvgAggregationBuilder.PARSER).addResultReader(InternalWeightedAvg::new).setAggregatorRegistrar(WeightedAvgAggregationBuilder::registerUsage), builder);
    registerAggregation(new AggregationSpec(SumAggregationBuilder.NAME, SumAggregationBuilder::new, SumAggregationBuilder.PARSER).addResultReader(Sum::new).setAggregatorRegistrar(SumAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(MinAggregationBuilder.NAME, MinAggregationBuilder::new, MinAggregationBuilder.PARSER).addResultReader(Min::new).setAggregatorRegistrar(MinAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(MaxAggregationBuilder.NAME, MaxAggregationBuilder::new, MaxAggregationBuilder.PARSER).addResultReader(Max::new).setAggregatorRegistrar(MaxAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(StatsAggregationBuilder.NAME, StatsAggregationBuilder::new, StatsAggregationBuilder.PARSER).addResultReader(InternalStats::new).setAggregatorRegistrar(StatsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(ExtendedStatsAggregationBuilder.NAME, ExtendedStatsAggregationBuilder::new, ExtendedStatsAggregationBuilder.PARSER).addResultReader(InternalExtendedStats::new).setAggregatorRegistrar(ExtendedStatsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(ValueCountAggregationBuilder.NAME, ValueCountAggregationBuilder::new, ValueCountAggregationBuilder.PARSER).addResultReader(InternalValueCount::new).setAggregatorRegistrar(ValueCountAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(PercentilesAggregationBuilder.NAME, PercentilesAggregationBuilder::new, PercentilesAggregationBuilder.PARSER).addResultReader(InternalTDigestPercentiles.NAME, InternalTDigestPercentiles::new).addResultReader(InternalHDRPercentiles.NAME, InternalHDRPercentiles::new).setAggregatorRegistrar(PercentilesAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(PercentileRanksAggregationBuilder.NAME, PercentileRanksAggregationBuilder::new, PercentileRanksAggregationBuilder.PARSER).addResultReader(InternalTDigestPercentileRanks.NAME, InternalTDigestPercentileRanks::new).addResultReader(InternalHDRPercentileRanks.NAME, InternalHDRPercentileRanks::new).setAggregatorRegistrar(PercentileRanksAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(MedianAbsoluteDeviationAggregationBuilder.NAME, MedianAbsoluteDeviationAggregationBuilder::new, MedianAbsoluteDeviationAggregationBuilder.PARSER).addResultReader(InternalMedianAbsoluteDeviation::new).setAggregatorRegistrar(MedianAbsoluteDeviationAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(CardinalityAggregationBuilder.NAME, CardinalityAggregationBuilder::new, CardinalityAggregationBuilder.PARSER).addResultReader(InternalCardinality::new).setAggregatorRegistrar(CardinalityAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(GlobalAggregationBuilder.NAME, GlobalAggregationBuilder::new, GlobalAggregationBuilder::parse).addResultReader(InternalGlobal::new), builder);
    registerAggregation(new AggregationSpec(MissingAggregationBuilder.NAME, MissingAggregationBuilder::new, MissingAggregationBuilder.PARSER).addResultReader(InternalMissing::new).setAggregatorRegistrar(MissingAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(FilterAggregationBuilder.NAME, FilterAggregationBuilder::new, FilterAggregationBuilder::parse).addResultReader(InternalFilter::new), builder);
    registerAggregation(new AggregationSpec(FiltersAggregationBuilder.NAME, FiltersAggregationBuilder::new, FiltersAggregationBuilder::parse).addResultReader(InternalFilters::new), builder);
    registerAggregation(new AggregationSpec(RandomSamplerAggregationBuilder.NAME, RandomSamplerAggregationBuilder::new, RandomSamplerAggregationBuilder.PARSER).addResultReader(InternalRandomSampler.NAME, InternalRandomSampler::new).setAggregatorRegistrar(s -> s.registerUsage(RandomSamplerAggregationBuilder.NAME)), builder);
    registerAggregation(new AggregationSpec(SamplerAggregationBuilder.NAME, SamplerAggregationBuilder::new, SamplerAggregationBuilder::parse).addResultReader(InternalSampler.NAME, InternalSampler::new).addResultReader(UnmappedSampler.NAME, UnmappedSampler::new), builder);
    registerAggregation(new AggregationSpec(DiversifiedAggregationBuilder.NAME, DiversifiedAggregationBuilder::new, DiversifiedAggregationBuilder.PARSER).setAggregatorRegistrar(DiversifiedAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(TermsAggregationBuilder.NAME, TermsAggregationBuilder::new, TermsAggregationBuilder.PARSER).addResultReader(StringTerms.NAME, StringTerms::new).addResultReader(UnmappedTerms.NAME, UnmappedTerms::new).addResultReader(LongTerms.NAME, LongTerms::new).addResultReader(DoubleTerms.NAME, DoubleTerms::new).setAggregatorRegistrar(TermsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(RareTermsAggregationBuilder.NAME, RareTermsAggregationBuilder::new, RareTermsAggregationBuilder.PARSER).addResultReader(StringRareTerms.NAME, StringRareTerms::new).addResultReader(UnmappedRareTerms.NAME, UnmappedRareTerms::new).addResultReader(LongRareTerms.NAME, LongRareTerms::new).setAggregatorRegistrar(RareTermsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(SignificantTermsAggregationBuilder.NAME, SignificantTermsAggregationBuilder::new, SignificantTermsAggregationBuilder::parse).addResultReader(SignificantStringTerms.NAME, SignificantStringTerms::new).addResultReader(SignificantLongTerms.NAME, SignificantLongTerms::new).addResultReader(UnmappedSignificantTerms.NAME, UnmappedSignificantTerms::new).setAggregatorRegistrar(SignificantTermsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(SignificantTextAggregationBuilder.NAME, SignificantTextAggregationBuilder::new, SignificantTextAggregationBuilder::parse), builder);
    registerAggregation(new AggregationSpec(RangeAggregationBuilder.NAME, RangeAggregationBuilder::new, RangeAggregationBuilder.PARSER).addResultReader(InternalRange::new).setAggregatorRegistrar(RangeAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(DateRangeAggregationBuilder.NAME, DateRangeAggregationBuilder::new, DateRangeAggregationBuilder.PARSER).addResultReader(InternalDateRange::new).setAggregatorRegistrar(DateRangeAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(IpPrefixAggregationBuilder.NAME, IpPrefixAggregationBuilder::new, IpPrefixAggregationBuilder.PARSER).addResultReader(InternalIpPrefix::new).setAggregatorRegistrar(IpPrefixAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(IpRangeAggregationBuilder.NAME, IpRangeAggregationBuilder::new, IpRangeAggregationBuilder.PARSER).addResultReader(InternalBinaryRange::new).setAggregatorRegistrar(IpRangeAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(HistogramAggregationBuilder.NAME, HistogramAggregationBuilder::new, HistogramAggregationBuilder.PARSER).addResultReader(InternalHistogram::new).setAggregatorRegistrar(HistogramAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder::new, DateHistogramAggregationBuilder.PARSER).addResultReader(InternalDateHistogram::new).setAggregatorRegistrar(DateHistogramAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(VariableWidthHistogramAggregationBuilder.NAME, VariableWidthHistogramAggregationBuilder::new, VariableWidthHistogramAggregationBuilder.PARSER).addResultReader(InternalVariableWidthHistogram::new).setAggregatorRegistrar(VariableWidthHistogramAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new, GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new).setAggregatorRegistrar(GeoDistanceAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(NestedAggregationBuilder.NAME, NestedAggregationBuilder::new, NestedAggregationBuilder::parse).addResultReader(InternalNested::new), builder);
    registerAggregation(new AggregationSpec(ReverseNestedAggregationBuilder.NAME, ReverseNestedAggregationBuilder::new, ReverseNestedAggregationBuilder::parse).addResultReader(InternalReverseNested::new), builder);
    registerAggregation(new AggregationSpec(TopHitsAggregationBuilder.NAME, TopHitsAggregationBuilder::new, TopHitsAggregationBuilder::parse).addResultReader(InternalTopHits::new), builder);
    registerAggregation(new AggregationSpec(GeoBoundsAggregationBuilder.NAME, GeoBoundsAggregationBuilder::new, GeoBoundsAggregationBuilder.PARSER).addResultReader(InternalGeoBounds::new).setAggregatorRegistrar(GeoBoundsAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(GeoCentroidAggregationBuilder.NAME, GeoCentroidAggregationBuilder::new, GeoCentroidAggregationBuilder.PARSER).addResultReader(InternalGeoCentroid::new).setAggregatorRegistrar(GeoCentroidAggregationBuilder::registerAggregators), builder);
    registerAggregation(new AggregationSpec(ScriptedMetricAggregationBuilder.NAME, ScriptedMetricAggregationBuilder::new, ScriptedMetricAggregationBuilder.PARSER).addResultReader(InternalScriptedMetric::new), builder);
    registerAggregation(new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder.PARSER).addResultReader(InternalComposite::new).setAggregatorRegistrar(CompositeAggregationBuilder::registerAggregators), builder);
    if (RestApiVersion.minimumSupported() == RestApiVersion.V_7) {
        registerQuery(new QuerySpec<>(CommonTermsQueryBuilder.NAME_V7, (streamInput) -> new CommonTermsQueryBuilder(), CommonTermsQueryBuilder::fromXContent));
    }
    registerFromPlugin(plugins, SearchPlugin::getAggregations, (agg) -> this.registerAggregation(agg, builder));
    registerFromPlugin(plugins, SearchPlugin::getAggregationExtentions, (registrar) -> {
        if (registrar != null) {
            registrar.accept(builder);
        }
    });
    registerFromPlugin(plugins, SearchPlugin::getGenericNamedWriteables, this::registerGenericNamedWriteable);
    return builder.build();
}
191642.4425306elasticsearch
private void doTestAutoReleaseIndices(boolean testMaxHeadroom) {
    AtomicReference<Set<String>> indicesToMarkReadOnly = new AtomicReference<>();
    AtomicReference<Set<String>> indicesToRelease = new AtomicReference<>();
    AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test_1").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(1)).put(IndexMetadata.builder("test_2").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(1)).build();
    RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test_1")).addAsNew(metadata.index("test_2")).build();
    final ClusterState clusterState = applyStartedShardsUntilNoChange(ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNormalNode("node1")).add(newNormalNode("node2"))).build(), allocation);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), ShardRoutingState.STARTED).size(), equalTo(8));
    final long totalBytes = testMaxHeadroom ? ByteSizeValue.ofGb(10000).getBytes() : 100;
    Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> reservedSpaces = new HashMap<>();
    final long reservedSpaceNode1 = testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 150)).getBytes() : between(0, 10);
    reservedSpaces.put(new ClusterInfo.NodeAndPath("node1", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode1).build());
    final long reservedSpaceNode2 = testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 150)).getBytes() : between(0, 10);
    reservedSpaces.put(new ClusterInfo.NodeAndPath("node2", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode2).build());
    DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> clusterState, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, () -> 0L, (reason, priority, listener) -> {
        assertNotNull(listener);
        assertThat(priority, equalTo(Priority.HIGH));
        listener.onResponse(null);
    }) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToUpdate, Releasable onCompletion, boolean readOnly) {
            if (readOnly) {
                assertTrue(indicesToMarkReadOnly.compareAndSet(null, indicesToUpdate));
            } else {
                assertTrue(indicesToRelease.compareAndSet(null, indicesToUpdate));
            }
            onCompletion.close();
        }
    };
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    Map<String, DiskUsage> builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 9850)).getBytes() : between(5, 90)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 9850)).getBytes() : between(5, 90)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertNull(indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    IndexMetadata indexMetadata = IndexMetadata.builder(clusterState.metadata().index("test_2")).settings(Settings.builder().put(clusterState.metadata().index("test_2").getSettings()).put(IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)).build();
    ClusterState clusterStateWithBlocks = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build()).blocks(ClusterBlocks.builder().addBlocks(indexMetadata).build()).build();
    assertTrue(clusterStateWithBlocks.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2"));
    monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> clusterStateWithBlocks, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, () -> 0L, (reason, priority, listener) -> {
        assertNotNull(listener);
        assertThat(priority, equalTo(Priority.HIGH));
        listener.onResponse(null);
    }) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToUpdate, Releasable onCompletion, boolean readOnly) {
            if (readOnly) {
                assertTrue(indicesToMarkReadOnly.compareAndSet(null, indicesToUpdate));
            } else {
                assertTrue(indicesToRelease.compareAndSet(null, indicesToUpdate));
            }
            onCompletion.close();
        }
    };
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertThat(indicesToMarkReadOnly.get(), contains("test_1"));
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertNull(indicesToMarkReadOnly.get());
    assertThat(indicesToRelease.get(), contains("test_2"));
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder));
    assertThat(indicesToMarkReadOnly.get(), contains("test_1"));
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 149)).getBytes() : between(5, 9)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 10000)).getBytes() : between(5, 100)));
    if (randomBoolean()) {
        builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    }
    monitor.onNewInfo(clusterInfo(builder));
    assertNull(indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 10000)).getBytes() : between(5, 100)));
    if (randomBoolean()) {
        builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    }
    monitor.onNewInfo(clusterInfo(builder));
    assertNull(indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    if (randomBoolean()) {
        builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    }
    monitor.onNewInfo(clusterInfo(builder));
    assertThat(indicesToMarkReadOnly.get(), contains("test_1"));
    assertNull(indicesToRelease.get());
}
196579.351278elasticsearch
protected Table getTableWithHeader(RestRequest request) {
    Table table = new Table();
    table.startHeaders();
    table.addCell("id", TableColumnAttributeBuilder.builder("the job_id").build());
    table.addCell("state", TableColumnAttributeBuilder.builder("the job state").setAliases("s").setTextAlignment(TableColumnAttributeBuilder.TextAlign.RIGHT).build());
    table.addCell("opened_time", TableColumnAttributeBuilder.builder("the amount of time the job has been opened", false).setAliases("ot").build());
    table.addCell("assignment_explanation", TableColumnAttributeBuilder.builder("why the job is or is not assigned to a node", false).setAliases("ae").build());
    table.addCell("data.processed_records", TableColumnAttributeBuilder.builder("number of processed records").setAliases("dpr", "dataProcessedRecords").build());
    table.addCell("data.processed_fields", TableColumnAttributeBuilder.builder("number of processed fields", false).setAliases("dpf", "dataProcessedFields").build());
    table.addCell("data.input_bytes", TableColumnAttributeBuilder.builder("total input bytes", false).setAliases("dib", "dataInputBytes").build());
    table.addCell("data.input_records", TableColumnAttributeBuilder.builder("total record count", false).setAliases("dir", "dataInputRecords").build());
    table.addCell("data.input_fields", TableColumnAttributeBuilder.builder("total field count", false).setAliases("dif", "dataInputFields").build());
    table.addCell("data.invalid_dates", TableColumnAttributeBuilder.builder("number of records with invalid dates", false).setAliases("did", "dataInvalidDates").build());
    table.addCell("data.missing_fields", TableColumnAttributeBuilder.builder("number of records with missing fields", false).setAliases("dmf", "dataMissingFields").build());
    table.addCell("data.out_of_order_timestamps", TableColumnAttributeBuilder.builder("number of records handled out of order", false).setAliases("doot", "dataOutOfOrderTimestamps").build());
    table.addCell("data.empty_buckets", TableColumnAttributeBuilder.builder("number of empty buckets", false).setAliases("deb", "dataEmptyBuckets").build());
    table.addCell("data.sparse_buckets", TableColumnAttributeBuilder.builder("number of sparse buckets", false).setAliases("dsb", "dataSparseBuckets").build());
    table.addCell("data.buckets", TableColumnAttributeBuilder.builder("total bucket count", false).setAliases("db", "dataBuckets").build());
    table.addCell("data.earliest_record", TableColumnAttributeBuilder.builder("earliest record time", false).setAliases("der", "dataEarliestRecord").build());
    table.addCell("data.latest_record", TableColumnAttributeBuilder.builder("latest record time", false).setAliases("dlr", "dataLatestRecord").build());
    table.addCell("data.last", TableColumnAttributeBuilder.builder("last time data was seen", false).setAliases("dl", "dataLast").build());
    table.addCell("data.last_empty_bucket", TableColumnAttributeBuilder.builder("last time an empty bucket occurred", false).setAliases("dleb", "dataLastEmptyBucket").build());
    table.addCell("data.last_sparse_bucket", TableColumnAttributeBuilder.builder("last time a sparse bucket occurred", false).setAliases("dlsb", "dataLastSparseBucket").build());
    table.addCell("model.bytes", TableColumnAttributeBuilder.builder("model size").setAliases("mb", "modelBytes").build());
    table.addCell("model.memory_status", TableColumnAttributeBuilder.builder("current memory status").setAliases("mms", "modelMemoryStatus").setTextAlignment(TableColumnAttributeBuilder.TextAlign.RIGHT).build());
    table.addCell("model.bytes_exceeded", TableColumnAttributeBuilder.builder("how much the model has exceeded the limit", false).setAliases("mbe", "modelBytesExceeded").build());
    table.addCell("model.memory_limit", TableColumnAttributeBuilder.builder("model memory limit", false).setAliases("mml", "modelMemoryLimit").build());
    table.addCell("model.by_fields", TableColumnAttributeBuilder.builder("count of 'by' fields", false).setAliases("mbf", "modelByFields").build());
    table.addCell("model.over_fields", TableColumnAttributeBuilder.builder("count of 'over' fields", false).setAliases("mof", "modelOverFields").build());
    table.addCell("model.partition_fields", TableColumnAttributeBuilder.builder("count of 'partition' fields", false).setAliases("mpf", "modelPartitionFields").build());
    table.addCell("model.bucket_allocation_failures", TableColumnAttributeBuilder.builder("number of bucket allocation failures", false).setAliases("mbaf", "modelBucketAllocationFailures").build());
    table.addCell("model.categorization_status", TableColumnAttributeBuilder.builder("current categorization status", false).setAliases("mcs", "modelCategorizationStatus").setTextAlignment(TableColumnAttributeBuilder.TextAlign.RIGHT).build());
    table.addCell("model.categorized_doc_count", TableColumnAttributeBuilder.builder("count of categorized documents", false).setAliases("mcdc", "modelCategorizedDocCount").build());
    table.addCell("model.total_category_count", TableColumnAttributeBuilder.builder("count of categories", false).setAliases("mtcc", "modelTotalCategoryCount").build());
    table.addCell("model.frequent_category_count", TableColumnAttributeBuilder.builder("count of frequent categories", false).setAliases("mfcc", "modelFrequentCategoryCount").build());
    table.addCell("model.rare_category_count", TableColumnAttributeBuilder.builder("count of rare categories", false).setAliases("mrcc", "modelRareCategoryCount").build());
    table.addCell("model.dead_category_count", TableColumnAttributeBuilder.builder("count of dead categories", false).setAliases("mdcc", "modelDeadCategoryCount").build());
    table.addCell("model.failed_category_count", TableColumnAttributeBuilder.builder("count of failed categories", false).setAliases("mfcc", "modelFailedCategoryCount").build());
    table.addCell("model.log_time", TableColumnAttributeBuilder.builder("when the model stats were gathered", false).setAliases("mlt", "modelLogTime").build());
    table.addCell("model.timestamp", TableColumnAttributeBuilder.builder("the time of the last record when the model stats were gathered", false).setAliases("mt", "modelTimestamp").build());
    table.addCell("forecasts." + ForecastStats.Fields.TOTAL, TableColumnAttributeBuilder.builder("total number of forecasts").setAliases("ft", "forecastsTotal").build());
    table.addCell("forecasts.memory.min", TableColumnAttributeBuilder.builder("minimum memory used by forecasts", false).setAliases("fmmin", "forecastsMemoryMin").build());
    table.addCell("forecasts.memory.max", TableColumnAttributeBuilder.builder("maximum memory used by forecasts", false).setAliases("fmmax", "forecastsMemoryMax").build());
    table.addCell("forecasts.memory.avg", TableColumnAttributeBuilder.builder("average memory used by forecasts", false).setAliases("fmavg", "forecastsMemoryAvg").build());
    table.addCell("forecasts.memory.total", TableColumnAttributeBuilder.builder("total memory used by all forecasts", false).setAliases("fmt", "forecastsMemoryTotal").build());
    table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".min", TableColumnAttributeBuilder.builder("minimum record count for forecasts", false).setAliases("frmin", "forecastsRecordsMin").build());
    table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".max", TableColumnAttributeBuilder.builder("maximum record count for forecasts", false).setAliases("frmax", "forecastsRecordsMax").build());
    table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".avg", TableColumnAttributeBuilder.builder("average record count for forecasts", false).setAliases("fravg", "forecastsRecordsAvg").build());
    table.addCell("forecasts." + ForecastStats.Fields.RECORDS + ".total", TableColumnAttributeBuilder.builder("total record count for all forecasts", false).setAliases("frt", "forecastsRecordsTotal").build());
    table.addCell("forecasts.time.min", TableColumnAttributeBuilder.builder("minimum runtime for forecasts", false).setAliases("ftmin", "forecastsTimeMin").build());
    table.addCell("forecasts.time.max", TableColumnAttributeBuilder.builder("maximum run time for forecasts", false).setAliases("ftmax", "forecastsTimeMax").build());
    table.addCell("forecasts.time.avg", TableColumnAttributeBuilder.builder("average runtime for all forecasts (milliseconds)", false).setAliases("ftavg", "forecastsTimeAvg").build());
    table.addCell("forecasts.time.total", TableColumnAttributeBuilder.builder("total runtime for all forecasts", false).setAliases("ftt", "forecastsTimeTotal").build());
    table.addCell("node.id", TableColumnAttributeBuilder.builder("id of the assigned node", false).setAliases("ni", "nodeId").build());
    table.addCell("node.name", TableColumnAttributeBuilder.builder("name of the assigned node", false).setAliases("nn", "nodeName").build());
    table.addCell("node.ephemeral_id", TableColumnAttributeBuilder.builder("ephemeral id of the assigned node", false).setAliases("ne", "nodeEphemeralId").build());
    table.addCell("node.address", TableColumnAttributeBuilder.builder("network address of the assigned node", false).setAliases("na", "nodeAddress").build());
    table.addCell("buckets.count", TableColumnAttributeBuilder.builder("bucket count").setAliases("bc", "bucketsCount").build());
    table.addCell("buckets.time.total", TableColumnAttributeBuilder.builder("total bucket processing time", false).setAliases("btt", "bucketsTimeTotal").build());
    table.addCell("buckets.time.min", TableColumnAttributeBuilder.builder("minimum bucket processing time", false).setAliases("btmin", "bucketsTimeMin").build());
    table.addCell("buckets.time.max", TableColumnAttributeBuilder.builder("maximum bucket processing time", false).setAliases("btmax", "bucketsTimeMax").build());
    table.addCell("buckets.time.exp_avg", TableColumnAttributeBuilder.builder("exponential average bucket processing time (milliseconds)", false).setAliases("btea", "bucketsTimeExpAvg").build());
    table.addCell("buckets.time.exp_avg_hour", TableColumnAttributeBuilder.builder("exponential average bucket processing time by hour (milliseconds)", false).setAliases("bteah", "bucketsTimeExpAvgHour").build());
    table.endHeaders();
    return table;
}
195923.6514239elasticsearch
public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange() throws Exception {
    internalCluster().startMasterOnlyNode();
    internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
    final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode();
    final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode();
    final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot);
    final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3);
    createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.EMPTY);
    final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3);
    createIndexWithTimestamp(indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex).build());
    final int totalShards = indexOutsideSearchRangeShardCount + indexWithinSearchRangeShardCount;
    final boolean indexDataWithTimestamp = randomBoolean();
    final int numberOfDocsInIndexOutsideSearchRange = between(350, 1000);
    if (indexDataWithTimestamp) {
        indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange, TIMESTAMP_TEMPLATE_OUTSIDE_RANGE);
    } else {
        indexRandomDocs(indexOutsideSearchRange, numberOfDocsInIndexOutsideSearchRange);
    }
    int numDocsWithinRange = between(100, 1000);
    indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, numDocsWithinRange, TIMESTAMP_TEMPLATE_WITHIN_RANGE);
    final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createRepository(repositoryName, "mock");
    final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId();
    assertAcked(indicesAdmin().prepareDelete(indexOutsideSearchRange));
    final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot);
    Settings restoredIndexSettings = Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot).build();
    final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest(searchableSnapshotIndexOutsideSearchRange, repositoryName, snapshotId.getName(), indexOutsideSearchRange, restoredIndexSettings, Strings.EMPTY_ARRAY, false, randomFrom(MountSearchableSnapshotRequest.Storage.values()));
    client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();
    final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange);
    assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
    DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex());
    assertThat(timestampFieldType, nullValue());
    final boolean includeIndexCoveringSearchRangeInSearchRequest = randomBoolean();
    List<String> indicesToSearch = new ArrayList<>();
    if (includeIndexCoveringSearchRangeInSearchRequest) {
        indicesToSearch.add(indexWithinSearchRange);
    }
    indicesToSearch.add(searchableSnapshotIndexOutsideSearchRange);
    RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME).from("2020-11-28T00:00:00.000000000Z", true).to("2020-11-29T00:00:00.000000000Z");
    SearchRequest request = new SearchRequest().indices(indicesToSearch.toArray(new String[0])).source(new SearchSourceBuilder().query(rangeQuery));
    if (includeIndexCoveringSearchRangeInSearchRequest) {
        assertResponse(client().search(request), searchResponse -> {
            assertThat(searchResponse.getSuccessfulShards(), equalTo(indexWithinSearchRangeShardCount));
            assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount));
            assertThat(searchResponse.getSkippedShards(), equalTo(0));
            assertThat(searchResponse.getTotalShards(), equalTo(totalShards));
        });
    } else {
        expectThrows(SearchPhaseExecutionException.class, () -> client().search(request).actionGet());
    }
    {
        boolean allowPartialSearchResults = includeIndexCoveringSearchRangeInSearchRequest;
        SearchShardsRequest searchShardsRequest = new SearchShardsRequest(indicesToSearch.toArray(new String[0]), SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
        if (includeIndexCoveringSearchRangeInSearchRequest) {
            SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
            assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards));
            List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
            List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
            List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
            assertThat(skipped.size(), equalTo(0));
            assertThat(notSkipped.size(), equalTo(totalShards));
        } else {
            SearchShardsResponse searchShardsResponse = null;
            try {
                searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
            } catch (SearchPhaseExecutionException e) {
            }
            if (searchShardsResponse != null) {
                for (SearchShardsGroup group : searchShardsResponse.getGroups()) {
                    assertFalse("no shard should be marked as skipped", group.skipped());
                }
            }
        }
    }
    unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot);
    waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange);
    ensureGreen(searchableSnapshotIndexOutsideSearchRange);
    final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange);
    final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange();
    final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex());
    assertThat(dateFieldType, notNullValue());
    final DateFieldMapper.Resolution resolution = dateFieldType.resolution();
    assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true));
    if (indexDataWithTimestamp) {
        assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY)));
        assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))));
        assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z"))));
    } else {
        assertThat(updatedTimestampMillisRange, sameInstance(IndexLongFieldRange.EMPTY));
    }
    internalCluster().stopNode(dataNodeHoldingSearchableSnapshot);
    waitUntilAllShardsAreUnassigned(updatedIndexMetadata.getIndex());
    if (includeIndexCoveringSearchRangeInSearchRequest) {
        assertResponse(client().search(request), newSearchResponse -> {
            assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount));
            assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards));
            assertThat(newSearchResponse.getFailedShards(), equalTo(0));
            assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards));
            assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo((long) numDocsWithinRange));
        });
        {
            boolean allowPartialSearchResults = true;
            SearchShardsRequest searchShardsRequest = new SearchShardsRequest(indicesToSearch.toArray(new String[0]), SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
            SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
            assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards));
            List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
            List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
            List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
            assertThat(skipped.size(), equalTo(indexOutsideSearchRangeShardCount));
            assertThat(notSkipped.size(), equalTo(totalShards - indexOutsideSearchRangeShardCount));
        }
    } else {
        if (indexOutsideSearchRangeShardCount == 1) {
            expectThrows(SearchPhaseExecutionException.class, () -> client().search(request).actionGet());
            {
                boolean allowPartialSearchResults = false;
                SearchShardsRequest searchShardsRequest = new SearchShardsRequest(indicesToSearch.toArray(new String[0]), SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
                SearchShardsResponse searchShardsResponse = null;
                try {
                    searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
                } catch (SearchPhaseExecutionException e) {
                }
                if (searchShardsResponse != null) {
                    for (SearchShardsGroup group : searchShardsResponse.getGroups()) {
                        assertFalse("no shard should be marked as skipped", group.skipped());
                    }
                }
            }
        } else {
            assertResponse(client().search(request), newSearchResponse -> {
                assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1));
                assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1));
                assertThat(newSearchResponse.getFailedShards(), equalTo(1));
                assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount));
            });
            {
                boolean allowPartialSearchResults = true;
                SearchShardsRequest searchShardsRequest = new SearchShardsRequest(indicesToSearch.toArray(new String[0]), SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
                SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
                assertThat(searchShardsResponse.getGroups().size(), equalTo(indexOutsideSearchRangeShardCount));
                List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
                List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
                List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
                assertThat(skipped.size(), equalTo(indexOutsideSearchRangeShardCount));
                assertThat(notSkipped.size(), equalTo(indexOutsideSearchRangeShardCount - indexOutsideSearchRangeShardCount));
            }
        }
    }
}
195273.8227213gwt
public final String generate(TreeLogger logger, final GeneratorContext context, String typeName) throws UnableToCompleteException {
    TypeOracle typeOracle = context.getTypeOracle();
    PropertyOracle propertyOracle = context.getPropertyOracle();
    LocaleUtils localeUtils = LocaleUtils.getInstance(logger, propertyOracle, context);
    JClassType targetClass;
    try {
        targetClass = typeOracle.getType(typeName);
    } catch (NotFoundException e) {
        logger.log(TreeLogger.ERROR, "No such type " + typeName, e);
        throw new UnableToCompleteException();
    }
    assert (LocaleInfoImpl.class.getName().equals(targetClass.getQualifiedSourceName()));
    String packageName = targetClass.getPackage().getName();
    String superClassName = targetClass.getName().replace('.', '_') + "_shared";
    Set<GwtLocale> localeSet = localeUtils.getAllLocales();
    GwtLocaleImpl[] allLocales = localeSet.toArray(new GwtLocaleImpl[localeSet.size()]);
    Arrays.sort(allLocales);
    PrintWriter pw = context.tryCreate(logger, packageName, superClassName);
    if (pw != null) {
        LocalizedProperties displayNames = new LocalizedProperties();
        LocalizedProperties displayNamesManual = new LocalizedProperties();
        LocalizedProperties displayNamesOverride = new LocalizedProperties();
        try {
            InputStream str = ResourceLocatorImpl.tryFindResourceAsStream(logger, context.getResourcesOracle(), GENERATED_LOCALE_NATIVE_DISPLAY_NAMES);
            if (str != null) {
                displayNames.load(str, "UTF-8");
            }
            str = ResourceLocatorImpl.tryFindResourceAsStream(logger, context.getResourcesOracle(), MANUAL_LOCALE_NATIVE_DISPLAY_NAMES);
            if (str != null) {
                displayNamesManual.load(str, "UTF-8");
            }
            str = ResourceLocatorImpl.tryFindResourceAsStream(logger, context.getResourcesOracle(), OVERRIDE_LOCALE_NATIVE_DISPLAY_NAMES);
            if (str != null) {
                displayNamesOverride.load(str, "UTF-8");
            }
        } catch (UnsupportedEncodingException e) {
            logger.log(TreeLogger.ERROR, "UTF-8 encoding is not defined", e);
            throw new UnableToCompleteException();
        } catch (IOException e) {
            logger.log(TreeLogger.ERROR, "Exception reading locale display names", e);
            throw new UnableToCompleteException();
        }
        ClassSourceFileComposerFactory factory = new ClassSourceFileComposerFactory(packageName, superClassName);
        factory.setSuperclass(targetClass.getQualifiedSourceName());
        factory.addImport(GWT.class.getCanonicalName());
        factory.addImport(JavaScriptObject.class.getCanonicalName());
        factory.addImport(HashMap.class.getCanonicalName());
        SourceWriter writer = factory.createSourceWriter(context, pw);
        writer.println("private static native String getLocaleNativeDisplayName(");
        writer.println("    JavaScriptObject nativeDisplayNamesNative,String localeName) /*-{");
        writer.println("  return nativeDisplayNamesNative[localeName];");
        writer.println("}-*/;");
        writer.println();
        writer.println("HashMap<String,String> nativeDisplayNamesJava;");
        writer.println("private JavaScriptObject nativeDisplayNamesNative;");
        writer.println();
        writer.println("@Override");
        writer.println("public String[] getAvailableLocaleNames() {");
        writer.println("  return new String[] {");
        boolean hasAnyRtl = false;
        for (GwtLocaleImpl possibleLocale : allLocales) {
            writer.println("    \"" + possibleLocale.toString().replaceAll("\"", "\\\"") + "\",");
            if (RTL_LOCALES.contains(possibleLocale.getCanonicalForm().getLanguage())) {
                hasAnyRtl = true;
            }
        }
        writer.println("  };");
        writer.println("}");
        writer.println();
        writer.println("@Override");
        writer.println("public String getLocaleNativeDisplayName(String localeName) {");
        writer.println("  if (GWT.isScript()) {");
        writer.println("    if (nativeDisplayNamesNative == null) {");
        writer.println("      nativeDisplayNamesNative = loadNativeDisplayNamesNative();");
        writer.println("    }");
        writer.println("    return getLocaleNativeDisplayName(nativeDisplayNamesNative, localeName);");
        writer.println("  } else {");
        writer.println("    if (nativeDisplayNamesJava == null) {");
        writer.println("      nativeDisplayNamesJava = new HashMap<String, String>();");
        {
            for (GwtLocaleImpl possibleLocale : allLocales) {
                String localeName = possibleLocale.toString();
                String displayName = displayNamesOverride.getProperty(localeName);
                if (displayName == null) {
                    displayName = displayNamesManual.getProperty(localeName);
                }
                if (displayName == null) {
                    displayName = displayNames.getProperty(localeName);
                }
                if (displayName != null && displayName.length() != 0) {
                    writer.println("      nativeDisplayNamesJava.put(" + CodeGenUtils.asStringLiteral(localeName) + ", " + CodeGenUtils.asStringLiteral(displayName) + ");");
                }
            }
        }
        writer.println("    }");
        writer.println("    return nativeDisplayNamesJava.get(localeName);");
        writer.println("  }");
        writer.println("}");
        writer.println();
        writer.println("@Override");
        writer.println("public boolean hasAnyRTL() {");
        writer.println("  return " + hasAnyRtl + ";");
        writer.println("}");
        writer.println();
        writer.println("private native JavaScriptObject loadNativeDisplayNamesNative() /*-{");
        writer.println("  return {");
        {
            boolean needComma = false;
            for (GwtLocaleImpl possibleLocale : allLocales) {
                String localeName = possibleLocale.toString();
                String displayName = displayNamesOverride.getProperty(localeName);
                if (displayName == null) {
                    displayName = displayNamesManual.getProperty(localeName);
                }
                if (displayName == null) {
                    displayName = displayNames.getProperty(localeName);
                }
                if (displayName != null && displayName.length() != 0) {
                    if (needComma) {
                        writer.println(",");
                    }
                    writer.print("    " + CodeGenUtils.asStringLiteral(localeName) + ": " + CodeGenUtils.asStringLiteral(displayName));
                    needComma = true;
                }
            }
            if (needComma) {
                writer.println();
            }
        }
        writer.println("  };");
        writer.println("}-*/;");
        writer.commit(logger);
    }
    GwtLocale locale = localeUtils.getCompileLocale();
    String className = targetClass.getName().replace('.', '_') + "_" + locale.getAsString();
    Set<GwtLocale> runtimeLocales = localeUtils.getRuntimeLocales();
    if (!runtimeLocales.isEmpty()) {
        className += "_runtimeSelection";
    }
    pw = context.tryCreate(logger, packageName, className);
    if (pw != null) {
        ClassSourceFileComposerFactory factory = new ClassSourceFileComposerFactory(packageName, className);
        factory.setSuperclass(superClassName);
        factory.addImport("com.google.gwt.core.client.GWT");
        factory.addImport("com.google.gwt.i18n.client.LocaleInfo");
        factory.addImport("com.google.gwt.i18n.client.constants.NumberConstants");
        factory.addImport("com.google.gwt.i18n.client.constants.NumberConstantsImpl");
        factory.addImport("com.google.gwt.i18n.client.DateTimeFormatInfo");
        factory.addImport("com.google.gwt.i18n.client.impl.cldr.DateTimeFormatInfoImpl");
        SourceWriter writer = factory.createSourceWriter(context, pw);
        writer.println("@Override");
        writer.println("public String getLocaleName() {");
        if (runtimeLocales.isEmpty()) {
            writer.println("  return \"" + locale + "\";");
        } else {
            writer.println("  String rtLocale = getRuntimeLocale();");
            writer.println("  return rtLocale != null ? rtLocale : \"" + locale + "\";");
        }
        writer.println("}");
        writer.println();
        String queryParam = localeUtils.getQueryParam();
        if (queryParam != null) {
            writer.println("@Override");
            writer.println("public String getLocaleQueryParam() {");
            writer.println("  return " + CodeGenUtils.asStringLiteral(queryParam) + ";");
            writer.println("}");
            writer.println();
        }
        String cookie = localeUtils.getCookie();
        if (cookie != null) {
            writer.println("@Override");
            writer.println("public String getLocaleCookieName() {");
            writer.println("  return " + CodeGenUtils.asStringLiteral(cookie) + ";");
            writer.println("}");
            writer.println();
        }
        writer.println("@Override");
        writer.println("public DateTimeFormatInfo getDateTimeFormatInfo() {");
        LocalizableGenerator localizableGenerator = new LocalizableGenerator();
        GeneratorContext subContext = new CachedGeneratorContext(context);
        generateConstantsLookup(logger, subContext, writer, localizableGenerator, runtimeLocales, localeUtils, locale, "com.google.gwt.i18n.client.impl.cldr.DateTimeFormatInfoImpl");
        writer.println("}");
        writer.println();
        writer.println("@Override");
        writer.println("public NumberConstants getNumberConstants() {");
        generateConstantsLookup(logger, subContext, writer, localizableGenerator, runtimeLocales, localeUtils, locale, "com.google.gwt.i18n.client.constants.NumberConstantsImpl");
        writer.println("}");
        writer.commit(logger);
    }
    return packageName + "." + className;
}
196930.6825194hadoop
public static void dumpInfo(String file, PrintStream out, Configuration conf) throws IOException {
    final int maxKeySampleLen = 16;
    Path path = new Path(file);
    FileSystem fs = path.getFileSystem(conf);
    long length = fs.getFileStatus(path).getLen();
    FSDataInputStream fsdis = fs.open(path);
    TFile.Reader reader = new TFile.Reader(fsdis, length, conf);
    try {
        LinkedHashMap<String, String> properties = new LinkedHashMap<String, String>();
        int blockCnt = reader.readerBCF.getBlockCount();
        int metaBlkCnt = reader.readerBCF.metaIndex.index.size();
        properties.put("BCFile Version", reader.readerBCF.version.toString());
        properties.put("TFile Version", reader.tfileMeta.version.toString());
        properties.put("File Length", Long.toString(length));
        properties.put("Data Compression", reader.readerBCF.getDefaultCompressionName());
        properties.put("Record Count", Long.toString(reader.getEntryCount()));
        properties.put("Sorted", Boolean.toString(reader.isSorted()));
        if (reader.isSorted()) {
            properties.put("Comparator", reader.getComparatorName());
        }
        properties.put("Data Block Count", Integer.toString(blockCnt));
        long dataSize = 0, dataSizeUncompressed = 0;
        if (blockCnt > 0) {
            for (int i = 0; i < blockCnt; ++i) {
                BlockRegion region = reader.readerBCF.dataIndex.getBlockRegionList().get(i);
                dataSize += region.getCompressedSize();
                dataSizeUncompressed += region.getRawSize();
            }
            properties.put("Data Block Bytes", Long.toString(dataSize));
            if (!reader.readerBCF.getDefaultCompressionName().equals("none")) {
                properties.put("Data Block Uncompressed Bytes", Long.toString(dataSizeUncompressed));
                properties.put("Data Block Compression Ratio", String.format("1:%.1f", (double) dataSizeUncompressed / dataSize));
            }
        }
        properties.put("Meta Block Count", Integer.toString(metaBlkCnt));
        long metaSize = 0, metaSizeUncompressed = 0;
        if (metaBlkCnt > 0) {
            Collection<MetaIndexEntry> metaBlks = reader.readerBCF.metaIndex.index.values();
            boolean calculateCompression = false;
            for (Iterator<MetaIndexEntry> it = metaBlks.iterator(); it.hasNext(); ) {
                MetaIndexEntry e = it.next();
                metaSize += e.getRegion().getCompressedSize();
                metaSizeUncompressed += e.getRegion().getRawSize();
                if (e.getCompressionAlgorithm() != Compression.Algorithm.NONE) {
                    calculateCompression = true;
                }
            }
            properties.put("Meta Block Bytes", Long.toString(metaSize));
            if (calculateCompression) {
                properties.put("Meta Block Uncompressed Bytes", Long.toString(metaSizeUncompressed));
                properties.put("Meta Block Compression Ratio", String.format("1:%.1f", (double) metaSizeUncompressed / metaSize));
            }
        }
        properties.put("Meta-Data Size Ratio", String.format("1:%.1f", (double) dataSize / metaSize));
        long leftOverBytes = length - dataSize - metaSize;
        long miscSize = BCFile.Magic.size() * 2 + Long.SIZE / Byte.SIZE + Version.size();
        long metaIndexSize = leftOverBytes - miscSize;
        properties.put("Meta Block Index Bytes", Long.toString(metaIndexSize));
        properties.put("Headers Etc Bytes", Long.toString(miscSize));
        int maxKeyLength = 0;
        Set<Map.Entry<String, String>> entrySet = properties.entrySet();
        for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it.hasNext(); ) {
            Map.Entry<String, String> e = it.next();
            if (e.getKey().length() > maxKeyLength) {
                maxKeyLength = e.getKey().length();
            }
        }
        for (Iterator<Map.Entry<String, String>> it = entrySet.iterator(); it.hasNext(); ) {
            Map.Entry<String, String> e = it.next();
            out.printf("%s : %s%n", Align.format(e.getKey(), maxKeyLength, Align.LEFT), e.getValue());
        }
        out.println();
        reader.checkTFileDataIndex();
        if (blockCnt > 0) {
            String blkID = "Data-Block";
            int blkIDWidth = Align.calculateWidth(blkID, blockCnt);
            int blkIDWidth2 = Align.calculateWidth("", blockCnt);
            String offset = "Offset";
            int offsetWidth = Align.calculateWidth(offset, length);
            String blkLen = "Length";
            int blkLenWidth = Align.calculateWidth(blkLen, dataSize / blockCnt * 10);
            String rawSize = "Raw-Size";
            int rawSizeWidth = Align.calculateWidth(rawSize, dataSizeUncompressed / blockCnt * 10);
            String records = "Records";
            int recordsWidth = Align.calculateWidth(records, reader.getEntryCount() / blockCnt * 10);
            String endKey = "End-Key";
            int endKeyWidth = Math.max(endKey.length(), maxKeySampleLen * 2 + 5);
            out.printf("%s %s %s %s %s %s%n", Align.format(blkID, blkIDWidth, Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER), Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(rawSize, rawSizeWidth, Align.CENTER), Align.format(records, recordsWidth, Align.CENTER), Align.format(endKey, endKeyWidth, Align.LEFT));
            for (int i = 0; i < blockCnt; ++i) {
                BlockRegion region = reader.readerBCF.dataIndex.getBlockRegionList().get(i);
                TFileIndexEntry indexEntry = reader.tfileIndex.getEntry(i);
                out.printf("%s %s %s %s %s ", Align.format(Align.format(i, blkIDWidth2, Align.ZERO_PADDED), blkIDWidth, Align.LEFT), Align.format(region.getOffset(), offsetWidth, Align.LEFT), Align.format(region.getCompressedSize(), blkLenWidth, Align.LEFT), Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT), Align.format(indexEntry.kvEntries, recordsWidth, Align.LEFT));
                byte[] key = indexEntry.key;
                boolean asAscii = true;
                int sampleLen = Math.min(maxKeySampleLen, key.length);
                for (int j = 0; j < sampleLen; ++j) {
                    byte b = key[j];
                    if ((b < 32 && b != 9) || (b == 127)) {
                        asAscii = false;
                    }
                }
                if (!asAscii) {
                    out.print("0X");
                    for (int j = 0; j < sampleLen; ++j) {
                        byte b = key[i];
                        out.printf("%X", b);
                    }
                } else {
                    out.print(new String(key, 0, sampleLen, StandardCharsets.UTF_8));
                }
                if (sampleLen < key.length) {
                    out.print("...");
                }
                out.println();
            }
        }
        out.println();
        if (metaBlkCnt > 0) {
            String name = "Meta-Block";
            int maxNameLen = 0;
            Set<Map.Entry<String, MetaIndexEntry>> metaBlkEntrySet = reader.readerBCF.metaIndex.index.entrySet();
            for (Iterator<Map.Entry<String, MetaIndexEntry>> it = metaBlkEntrySet.iterator(); it.hasNext(); ) {
                Map.Entry<String, MetaIndexEntry> e = it.next();
                if (e.getKey().length() > maxNameLen) {
                    maxNameLen = e.getKey().length();
                }
            }
            int nameWidth = Math.max(name.length(), maxNameLen);
            String offset = "Offset";
            int offsetWidth = Align.calculateWidth(offset, length);
            String blkLen = "Length";
            int blkLenWidth = Align.calculateWidth(blkLen, metaSize / metaBlkCnt * 10);
            String rawSize = "Raw-Size";
            int rawSizeWidth = Align.calculateWidth(rawSize, metaSizeUncompressed / metaBlkCnt * 10);
            String compression = "Compression";
            int compressionWidth = compression.length();
            out.printf("%s %s %s %s %s%n", Align.format(name, nameWidth, Align.CENTER), Align.format(offset, offsetWidth, Align.CENTER), Align.format(blkLen, blkLenWidth, Align.CENTER), Align.format(rawSize, rawSizeWidth, Align.CENTER), Align.format(compression, compressionWidth, Align.LEFT));
            for (Iterator<Map.Entry<String, MetaIndexEntry>> it = metaBlkEntrySet.iterator(); it.hasNext(); ) {
                Map.Entry<String, MetaIndexEntry> e = it.next();
                String blkName = e.getValue().getMetaName();
                BlockRegion region = e.getValue().getRegion();
                String blkCompression = e.getValue().getCompressionAlgorithm().getName();
                out.printf("%s %s %s %s %s%n", Align.format(blkName, nameWidth, Align.LEFT), Align.format(region.getOffset(), offsetWidth, Align.LEFT), Align.format(region.getCompressedSize(), blkLenWidth, Align.LEFT), Align.format(region.getRawSize(), rawSizeWidth, Align.LEFT), Align.format(blkCompression, compressionWidth, Align.LEFT));
            }
        }
    } finally {
        IOUtils.cleanupWithLogger(LOG, reader, fsdis);
    }
}
193735.6737199hadoop
public static ApplicationReport convertToApplicationReport(TimelineEntity entity) {
    String user = null;
    String queue = null;
    String name = null;
    String type = null;
    boolean unmanagedApplication = false;
    long createdTime = 0;
    long launchTime = 0;
    long finishedTime = 0;
    float progress = 0.0f;
    int applicationPriority = 0;
    ApplicationAttemptId latestApplicationAttemptId = null;
    String diagnosticsInfo = null;
    FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
    YarnApplicationState state = YarnApplicationState.ACCEPTED;
    ApplicationResourceUsageReport appResources = null;
    Set<String> appTags = null;
    String appNodeLabelExpression = null;
    String amNodeLabelExpression = null;
    Map<String, Object> entityInfo = entity.getInfo();
    if (entityInfo != null) {
        if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) {
            user = entityInfo.get(ApplicationMetricsConstants.USER_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
            queue = entityInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.NAME_ENTITY_INFO)) {
            name = entityInfo.get(ApplicationMetricsConstants.NAME_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
            type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
            type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)) {
            unmanagedApplication = Boolean.parseBoolean(entityInfo.get(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO)) {
            applicationPriority = Integer.parseInt(entityInfo.get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
            appTags = new HashSet<>();
            Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
            if (obj != null && obj instanceof Collection<?>) {
                for (Object o : (Collection<?>) obj) {
                    if (o != null) {
                        appTags.add(o.toString());
                    }
                }
            }
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)) {
            latestApplicationAttemptId = ApplicationAttemptId.fromString(entityInfo.get(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
            diagnosticsInfo = entityInfo.get(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO).toString();
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)) {
            finalStatus = FinalApplicationStatus.valueOf(entityInfo.get(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO).toString());
        }
        if (entityInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
            state = YarnApplicationState.valueOf(entityInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
        }
    }
    Map<String, String> configs = entity.getConfigs();
    if (configs.containsKey(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION)) {
        appNodeLabelExpression = configs.get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION);
    }
    if (configs.containsKey(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION)) {
        amNodeLabelExpression = configs.get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION);
    }
    Set<TimelineMetric> metrics = entity.getMetrics();
    if (metrics != null) {
        long vcoreSeconds = 0;
        long memorySeconds = 0;
        long preemptedVcoreSeconds = 0;
        long preemptedMemorySeconds = 0;
        for (TimelineMetric metric : metrics) {
            switch(metric.getId()) {
                case ApplicationMetricsConstants.APP_CPU_METRICS:
                    vcoreSeconds = getAverageValue(metric.getValues().values());
                    break;
                case ApplicationMetricsConstants.APP_MEM_METRICS:
                    memorySeconds = getAverageValue(metric.getValues().values());
                    break;
                case ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS:
                    preemptedVcoreSeconds = getAverageValue(metric.getValues().values());
                    break;
                case ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS:
                    preemptedVcoreSeconds = getAverageValue(metric.getValues().values());
                    break;
                default:
                    break;
            }
        }
        Map<String, Long> resourceSecondsMap = new HashMap<>();
        Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
        resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
        resourceSecondsMap.put(ResourceInformation.VCORES.getName(), vcoreSeconds);
        preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), preemptedMemorySeconds);
        preemptedResourceSecondsMap.put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
        appResources = ApplicationResourceUsageReport.newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0, preemptedResourceSecondsMap);
    }
    NavigableSet<TimelineEvent> events = entity.getEvents();
    long updatedTimeStamp = 0L;
    if (events != null) {
        for (TimelineEvent event : events) {
            if (event.getId().equals(ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
                createdTime = event.getTimestamp();
            } else if (event.getId().equals(ApplicationMetricsConstants.LAUNCHED_EVENT_TYPE)) {
                launchTime = event.getTimestamp();
            } else if (event.getId().equals(ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
                if (event.getTimestamp() > updatedTimeStamp) {
                    updatedTimeStamp = event.getTimestamp();
                }
            } else if (event.getId().equals(ApplicationMetricsConstants.STATE_UPDATED_EVENT_TYPE)) {
                Map<String, Object> eventInfo = event.getInfo();
                if (eventInfo == null) {
                    continue;
                }
                if (eventInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
                    if (state == YarnApplicationState.ACCEPTED) {
                        state = YarnApplicationState.valueOf(eventInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
                    }
                }
            } else if (event.getId().equals(ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
                progress = 1.0F;
                state = YarnApplicationState.FINISHED;
                finishedTime = event.getTimestamp();
            }
        }
    }
    return ApplicationReport.newInstance(ApplicationId.fromString(entity.getId()), latestApplicationAttemptId, user, queue, name, null, -1, null, state, diagnosticsInfo, null, createdTime, launchTime, finishedTime, finalStatus, appResources, null, progress, type, null, appTags, unmanagedApplication, Priority.newInstance(applicationPriority), appNodeLabelExpression, amNodeLabelExpression);
}
197536.181281kafka
public void testReconciliationInJoiningConsumerGroupWithCooperativeProtocol() throws Exception {
    String groupId = "group-id";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    Uuid zarTopicId = Uuid.randomUuid();
    String zarTopicName = "zar";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addTopic(zarTopicId, zarTopicName, 1).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }).withMember(new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 1))).withAssignmentEpoch(10)).build();
    ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
    group.setMetadataRefreshDeadline(Long.MAX_VALUE, 11);
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
        }
    }));
    JoinGroupRequestData request1 = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(memberId1).withSessionTimeoutMs(5000).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))).build();
    GroupMetadataManagerTestContext.JoinResult joinResult1 = context.sendClassicGroupJoin(request1);
    ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setState(MemberState.UNREVOKED_PARTITIONS).setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0))).setPartitionsPendingRevocation(mkAssignment(mkTopicAssignment(barTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request1.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))))).build();
    List<CoordinatorRecord> expectedRecords1 = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
            put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, mkAssignment(mkTopicAssignment(barTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember1));
    assertEquals(expectedRecords1.size(), joinResult1.records.size());
    assertRecordsEquals(expectedRecords1.subList(0, 3), joinResult1.records.subList(0, 3));
    assertUnorderedListEquals(expectedRecords1.subList(3, 5), joinResult1.records.subList(3, 5));
    assertRecordsEquals(expectedRecords1.subList(5, 7), joinResult1.records.subList(5, 7));
    assertEquals(expectedMember1.state(), group.getOrMaybeCreateMember(memberId1, false).state());
    joinResult1.appendFuture.complete(null);
    JoinGroupResponseData joinResponse1 = joinResult1.joinFuture.get();
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(10).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResponse1);
    context.assertSessionTimeout(groupId, memberId1, request1.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request1.rebalanceTimeoutMs());
    context.verifyClassicGroupSyncToConsumerGroup(groupId, joinResponse1.memberId(), joinResponse1.generationId(), joinResponse1.protocolName(), joinResponse1.protocolType(), Collections.singletonList(new TopicPartition(fooTopicName, 0)));
    JoinGroupRequestData request2 = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(memberId1).withSessionTimeoutMs(5000).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.singletonList(new TopicPartition(fooTopicName, 0)))).build();
    GroupMetadataManagerTestContext.JoinResult joinResult2 = context.sendClassicGroupJoin(request2);
    ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(expectedMember1).setMemberEpoch(11).setState(MemberState.UNRELEASED_PARTITIONS).setPartitionsPendingRevocation(Collections.emptyMap()).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request2.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.singletonList(new TopicPartition(fooTopicName, 0)))))).build();
    assertRecordsEquals(Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember2)), joinResult2.records);
    assertEquals(expectedMember2.state(), group.getOrMaybeCreateMember(memberId1, false).state());
    joinResult2.appendFuture.complete(null);
    JoinGroupResponseData joinResponse2 = joinResult2.joinFuture.get();
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResponse2);
    context.assertSessionTimeout(groupId, memberId1, request2.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request2.rebalanceTimeoutMs());
    context.verifyClassicGroupSyncToConsumerGroup(groupId, joinResponse2.memberId(), joinResponse2.generationId(), joinResponse2.protocolName(), joinResponse2.protocolType(), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)));
    context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10).setTopicPartitions(Collections.emptyList()));
    JoinGroupRequestData request3 = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(memberId1).withSessionTimeoutMs(5000).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)))).build();
    GroupMetadataManagerTestContext.JoinResult joinResult3 = context.sendClassicGroupJoin(request3);
    ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(expectedMember2).setState(MemberState.STABLE).setPreviousMemberEpoch(11).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request3.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)))))).build();
    assertRecordsEquals(Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember3), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember3)), joinResult3.records);
    assertEquals(expectedMember3.state(), group.getOrMaybeCreateMember(memberId1, false).state());
    joinResult3.appendFuture.complete(null);
    JoinGroupResponseData joinResponse3 = joinResult3.joinFuture.get();
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResponse3);
    context.assertSessionTimeout(groupId, memberId1, request3.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request3.rebalanceTimeoutMs());
    context.verifyClassicGroupSyncToConsumerGroup(groupId, joinResponse3.memberId(), joinResponse3.generationId(), joinResponse3.protocolName(), joinResponse3.protocolType(), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(zarTopicName, 0)));
}
195687.94278kafka
public void testRolling() {
    final long startTime = SEGMENT_INTERVAL * 2;
    final long increment = SEGMENT_INTERVAL / 2;
    windowStore.put(0, "zero", startTime);
    assertEquals(Utils.mkSet(segments.segmentName(2)), segmentDirs(baseDir));
    windowStore.put(1, "one", startTime + increment);
    assertEquals(Utils.mkSet(segments.segmentName(2)), segmentDirs(baseDir));
    windowStore.put(2, "two", startTime + increment * 2);
    assertEquals(Utils.mkSet(segments.segmentName(2), segments.segmentName(3)), segmentDirs(baseDir));
    windowStore.put(4, "four", startTime + increment * 4);
    assertEquals(Utils.mkSet(segments.segmentName(2), segments.segmentName(3), segments.segmentName(4)), segmentDirs(baseDir));
    windowStore.put(5, "five", startTime + increment * 5);
    assertEquals(Utils.mkSet(segments.segmentName(2), segments.segmentName(3), segments.segmentName(4)), segmentDirs(baseDir));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    if (storeType == StoreType.RocksDBWindowStore) {
        assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    } else {
        assertEquals(new HashSet<>(Collections.singletonList("one")), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    }
    assertEquals(new HashSet<>(Collections.singletonList("two")), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("four")), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("five")), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    windowStore.put(6, "six", startTime + increment * 6);
    assertEquals(Utils.mkSet(segments.segmentName(3), segments.segmentName(4), segments.segmentName(5)), segmentDirs(baseDir));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    if (storeType == StoreType.RocksDBWindowStore) {
        assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    } else {
        assertEquals(new HashSet<>(Collections.singletonList("two")), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    }
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("four")), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("five")), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("six")), valuesToSet(windowStore.fetch(6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE))));
    windowStore.put(7, "seven", startTime + increment * 7);
    assertEquals(Utils.mkSet(segments.segmentName(3), segments.segmentName(4), segments.segmentName(5)), segmentDirs(baseDir));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("four")), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("five")), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("six")), valuesToSet(windowStore.fetch(6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("seven")), valuesToSet(windowStore.fetch(7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE))));
    windowStore.put(8, "eight", startTime + increment * 8);
    assertEquals(Utils.mkSet(segments.segmentName(4), segments.segmentName(5), segments.segmentName(6)), segmentDirs(baseDir));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    if (storeType == StoreType.RocksDBWindowStore) {
        assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    } else {
        assertEquals(new HashSet<>(Collections.singletonList("four")), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    }
    assertEquals(new HashSet<>(Collections.singletonList("five")), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("six")), valuesToSet(windowStore.fetch(6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("seven")), valuesToSet(windowStore.fetch(7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("eight")), valuesToSet(windowStore.fetch(8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE))));
    windowStore.flush();
    assertEquals(Utils.mkSet(segments.segmentName(4), segments.segmentName(5), segments.segmentName(6)), segmentDirs(baseDir));
}
191654.6561184spring-framework
public List<Token> process() {
    while (this.pos < this.max) {
        char ch = this.charsToProcess[this.pos];
        if (isAlphabetic(ch)) {
            lexIdentifier();
        } else {
            switch(ch) {
                case '+':
                    if (isTwoCharToken(TokenKind.INC)) {
                        pushPairToken(TokenKind.INC);
                    } else {
                        pushCharToken(TokenKind.PLUS);
                    }
                    break;
                case '_':
                    lexIdentifier();
                    break;
                case '-':
                    if (isTwoCharToken(TokenKind.DEC)) {
                        pushPairToken(TokenKind.DEC);
                    } else {
                        pushCharToken(TokenKind.MINUS);
                    }
                    break;
                case ':':
                    pushCharToken(TokenKind.COLON);
                    break;
                case '.':
                    pushCharToken(TokenKind.DOT);
                    break;
                case ',':
                    pushCharToken(TokenKind.COMMA);
                    break;
                case '*':
                    pushCharToken(TokenKind.STAR);
                    break;
                case '/':
                    pushCharToken(TokenKind.DIV);
                    break;
                case '%':
                    pushCharToken(TokenKind.MOD);
                    break;
                case '(':
                    pushCharToken(TokenKind.LPAREN);
                    break;
                case ')':
                    pushCharToken(TokenKind.RPAREN);
                    break;
                case '[':
                    pushCharToken(TokenKind.LSQUARE);
                    break;
                case '#':
                    pushCharToken(TokenKind.HASH);
                    break;
                case ']':
                    pushCharToken(TokenKind.RSQUARE);
                    break;
                case '{':
                    pushCharToken(TokenKind.LCURLY);
                    break;
                case '}':
                    pushCharToken(TokenKind.RCURLY);
                    break;
                case '@':
                    pushCharToken(TokenKind.BEAN_REF);
                    break;
                case '^':
                    if (isTwoCharToken(TokenKind.SELECT_FIRST)) {
                        pushPairToken(TokenKind.SELECT_FIRST);
                    } else {
                        pushCharToken(TokenKind.POWER);
                    }
                    break;
                case '!':
                    if (isTwoCharToken(TokenKind.NE)) {
                        pushPairToken(TokenKind.NE);
                    } else if (isTwoCharToken(TokenKind.PROJECT)) {
                        pushPairToken(TokenKind.PROJECT);
                    } else {
                        pushCharToken(TokenKind.NOT);
                    }
                    break;
                case '=':
                    if (isTwoCharToken(TokenKind.EQ)) {
                        pushPairToken(TokenKind.EQ);
                    } else {
                        pushCharToken(TokenKind.ASSIGN);
                    }
                    break;
                case '&':
                    if (isTwoCharToken(TokenKind.SYMBOLIC_AND)) {
                        pushPairToken(TokenKind.SYMBOLIC_AND);
                    } else {
                        pushCharToken(TokenKind.FACTORY_BEAN_REF);
                    }
                    break;
                case '|':
                    if (!isTwoCharToken(TokenKind.SYMBOLIC_OR)) {
                        raiseParseException(this.pos, SpelMessage.MISSING_CHARACTER, "|");
                    }
                    pushPairToken(TokenKind.SYMBOLIC_OR);
                    break;
                case '?':
                    if (isTwoCharToken(TokenKind.SELECT)) {
                        pushPairToken(TokenKind.SELECT);
                    } else if (isTwoCharToken(TokenKind.ELVIS)) {
                        pushPairToken(TokenKind.ELVIS);
                    } else if (isTwoCharToken(TokenKind.SAFE_NAVI)) {
                        pushPairToken(TokenKind.SAFE_NAVI);
                    } else {
                        pushCharToken(TokenKind.QMARK);
                    }
                    break;
                case '$':
                    if (isTwoCharToken(TokenKind.SELECT_LAST)) {
                        pushPairToken(TokenKind.SELECT_LAST);
                    } else {
                        lexIdentifier();
                    }
                    break;
                case '>':
                    if (isTwoCharToken(TokenKind.GE)) {
                        pushPairToken(TokenKind.GE);
                    } else {
                        pushCharToken(TokenKind.GT);
                    }
                    break;
                case '<':
                    if (isTwoCharToken(TokenKind.LE)) {
                        pushPairToken(TokenKind.LE);
                    } else {
                        pushCharToken(TokenKind.LT);
                    }
                    break;
                case '0':
                case '1':
                case '2':
                case '3':
                case '4':
                case '5':
                case '6':
                case '7':
                case '8':
                case '9':
                    lexNumericLiteral(ch == '0');
                    break;
                case ' ':
                case '\t':
                case '\r':
                case '\n':
                    this.pos++;
                    break;
                case '\'':
                    lexQuotedStringLiteral();
                    break;
                case '"':
                    lexDoubleQuotedStringLiteral();
                    break;
                case 0:
                    this.pos++;
                    break;
                case '\\':
                    raiseParseException(this.pos, SpelMessage.UNEXPECTED_ESCAPE_CHAR);
                    break;
                default:
                    throw new IllegalStateException("Unsupported character '%s' (%d) encountered at position %d in expression.".formatted(ch, (int) ch, (this.pos + 1)));
            }
        }
    }
    return this.tokens;
}
193286.6242196wildfly
private void parseXADataSource_4_0(XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final XaDataSource.Attribute attribute = XaDataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case MCP:
                {
                    final String value = rawAttributeText(reader, MCP.getXmlName());
                    if (value != null) {
                        MCP.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case ENLISTMENT_TRACE:
                {
                    final String value = rawAttributeText(reader, ENLISTMENT_TRACE.getXmlName());
                    ENLISTMENT_TRACE.parseAndSetParameter(value, operation, reader);
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(XA_DATASOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> xadatasourcePropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.XA_DATASOURCE) {
                        list.add(operation);
                        list.addAll(xadatasourcePropertiesOperations);
                        return;
                    } else {
                        if (XaDataSource.Tag.forName(reader.getLocalName()) == XaDataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(XaDataSource.Tag.forName(reader.getLocalName())) {
                        case XA_DATASOURCE_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(XADATASOURCE_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                XADATASOURCE_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                xadatasourcePropertiesOperations.add(configOperation);
                                break;
                            }
                        case XA_DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                XA_DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case XA_POOL:
                            {
                                parseXaPool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_PROPERTY:
                            {
                                String value = rawElementText(reader);
                                URL_PROPERTY.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                switch(Namespace.forUri(reader.getNamespaceURI())) {
                                    case DATASOURCES_4_0:
                                        parseDsSecurity(reader, operation);
                                        break;
                                    default:
                                        parseDsSecurity_5_0(reader, operation);
                                        break;
                                }
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
193326.1243199wildfly
private void parseXADataSource_7_0(XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final XaDataSource.Attribute attribute = XaDataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case MCP:
                {
                    final String value = rawAttributeText(reader, MCP.getXmlName());
                    if (value != null) {
                        MCP.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case ENLISTMENT_TRACE:
                {
                    final String value = rawAttributeText(reader, ENLISTMENT_TRACE.getXmlName());
                    ENLISTMENT_TRACE.parseAndSetParameter(value, operation, reader);
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(XA_DATASOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> xadatasourcePropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.XA_DATASOURCE) {
                        list.add(operation);
                        list.addAll(xadatasourcePropertiesOperations);
                        return;
                    } else {
                        if (XaDataSource.Tag.forName(reader.getLocalName()) == XaDataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(XaDataSource.Tag.forName(reader.getLocalName())) {
                        case XA_DATASOURCE_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(XADATASOURCE_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                XADATASOURCE_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                xadatasourcePropertiesOperations.add(configOperation);
                                break;
                            }
                        case XA_DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                XA_DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case XA_POOL:
                            {
                                parseXaPool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_PROPERTY:
                            {
                                String value = rawElementText(reader);
                                URL_PROPERTY.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                switch(Namespace.forUri(reader.getNamespaceURI())) {
                                    case DATASOURCES_4_0:
                                        parseDsSecurity(reader, operation);
                                        break;
                                    case DATASOURCES_7_0:
                                        parseDsSecurity_5_0(reader, operation);
                                        break;
                                    default:
                                        parseDsSecurity_7_1(reader, operation);
                                        break;
                                }
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSetting_7_0(reader, operation);
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
194826.8946170wildfly
protected void writeElements(final XMLExtendedStreamWriter writer, final SubsystemMarshallingContext context) throws XMLStreamException {
    ModelNode model = context.getModelNode();
    boolean sessionBeanStartWritten = false;
    if (model.hasDefined(EJB3SubsystemModel.DEFAULT_SLSB_INSTANCE_POOL)) {
        sessionBeanStartWritten = writeSessionBeanStartElement(writer, sessionBeanStartWritten);
        writer.writeStartElement(EJB3SubsystemXMLElement.STATELESS.getLocalName());
        this.writeDefaultSLSBPool(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.DEFAULT_STATEFUL_BEAN_ACCESS_TIMEOUT) || model.hasDefined(EJB3SubsystemModel.DEFAULT_SFSB_CACHE) || model.hasDefined(EJB3SubsystemModel.DEFAULT_SFSB_PASSIVATION_DISABLED_CACHE)) {
        sessionBeanStartWritten = writeSessionBeanStartElement(writer, sessionBeanStartWritten);
        writer.writeStartElement(EJB3SubsystemXMLElement.STATEFUL.getLocalName());
        this.writeStatefulBean(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.DEFAULT_SINGLETON_BEAN_ACCESS_TIMEOUT)) {
        sessionBeanStartWritten = writeSessionBeanStartElement(writer, sessionBeanStartWritten);
        writer.writeStartElement(EJB3SubsystemXMLElement.SINGLETON.getLocalName());
        this.writeSingletonBean(writer, model);
        writer.writeEndElement();
    }
    if (sessionBeanStartWritten) {
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.DEFAULT_MDB_INSTANCE_POOL) || model.hasDefined(EJB3SubsystemModel.DEFAULT_RESOURCE_ADAPTER_NAME) || model.hasDefined(EJB3SubsystemModel.MDB_DELIVERY_GROUP)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.MDB.getLocalName());
        this.writeMDB(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.DEFAULT_ENTITY_BEAN_INSTANCE_POOL) || model.hasDefined(EJB3SubsystemModel.DEFAULT_ENTITY_BEAN_OPTIMISTIC_LOCKING)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.ENTITY_BEAN.getLocalName());
        this.writeEntityBean(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.STRICT_MAX_BEAN_INSTANCE_POOL)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.POOLS.getLocalName());
        writer.writeStartElement(EJB3SubsystemXMLElement.BEAN_INSTANCE_POOLS.getLocalName());
        this.writeBeanInstancePools(writer, model);
        writer.writeEndElement();
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.CACHE) || model.hasDefined(EJB3SubsystemModel.SIMPLE_CACHE) || model.hasDefined(EJB3SubsystemModel.DISTRIBUTABLE_CACHE)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.CACHES.getLocalName());
        this.writeCaches(writer, model);
        this.writeSimpleCaches(writer, model);
        this.writeDistributableCaches(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(EJB3SubsystemModel.PASSIVATION_STORE) || model.hasDefined(EJB3SubsystemModel.CLUSTER_PASSIVATION_STORE) || model.hasDefined(EJB3SubsystemModel.FILE_PASSIVATION_STORE)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.PASSIVATION_STORES.getLocalName());
        this.writePassivationStores(writer, model);
        this.writeFilePassivationStores(writer, model);
        this.writeClusterPassivationStores(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVICE) && model.get(SERVICE).hasDefined(ASYNC)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.ASYNC.getLocalName());
        writeAsync(writer, model.get(SERVICE, ASYNC));
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVICE) && model.get(SERVICE).hasDefined(TIMER_SERVICE)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.TIMER_SERVICE.getLocalName());
        final ModelNode timerServiceModel = model.get(SERVICE, TIMER_SERVICE);
        this.writeTimerService(writer, timerServiceModel);
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVICE) && model.get(SERVICE).hasDefined(REMOTE)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.REMOTE.getLocalName());
        writeRemote(writer, model.get(SERVICE, REMOTE));
        if (model.hasDefined(REMOTING_PROFILE)) {
            writer.writeStartElement(EJB3SubsystemXMLElement.PROFILES.getLocalName());
            writeProfiles(writer, model);
            writer.writeEndElement();
        }
        writer.writeEndElement();
    }
    if (model.hasDefined(THREAD_POOL)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.THREAD_POOLS.getLocalName());
        final ModelNode threadsModel = model.get(THREAD_POOL);
        this.writeThreadPools(writer, threadsModel);
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVICE) && model.get(SERVICE).hasDefined(IIOP)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.IIOP.getLocalName());
        writeIIOP(writer, model.get(SERVICE, IIOP));
        writer.writeEndElement();
    }
    if (model.hasDefined(IN_VM_REMOTE_INTERFACE_INVOCATION_PASS_BY_VALUE)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.IN_VM_REMOTE_INTERFACE_INVOCATION.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.PASS_BY_VALUE.getLocalName(), model.get(EJB3SubsystemModel.IN_VM_REMOTE_INTERFACE_INVOCATION_PASS_BY_VALUE).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(DEFAULT_DISTINCT_NAME)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.DEFAULT_DISTINCT_NAME.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(EJB3SubsystemModel.DEFAULT_DISTINCT_NAME).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(DEFAULT_SECURITY_DOMAIN)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.DEFAULT_SECURITY_DOMAIN.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(DEFAULT_SECURITY_DOMAIN).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(APPLICATION_SECURITY_DOMAIN)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.APPLICATION_SECURITY_DOMAINS.getLocalName());
        writeApplicationSecurityDomains(writer, model);
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVICE) && model.get(SERVICE).hasDefined(IDENTITY) && model.get(SERVICE, IDENTITY).hasDefined(IdentityResourceDefinition.OUTFLOW_SECURITY_DOMAINS.getName())) {
        writer.writeStartElement(EJB3SubsystemXMLElement.IDENTITY.getLocalName());
        writeAttribute(writer, model.get(SERVICE, IDENTITY), IdentityResourceDefinition.OUTFLOW_SECURITY_DOMAINS, false);
        writer.writeEndElement();
    }
    if (model.hasDefined(DEFAULT_MISSING_METHOD_PERMISSIONS_DENY_ACCESS)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.DEFAULT_MISSING_METHOD_PERMISSIONS_DENY_ACCESS.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(DEFAULT_MISSING_METHOD_PERMISSIONS_DENY_ACCESS).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(DISABLE_DEFAULT_EJB_PERMISSIONS)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.DISABLE_DEFAULT_EJB_PERMISSIONS.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(DISABLE_DEFAULT_EJB_PERMISSIONS).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(ENABLE_GRACEFUL_TXN_SHUTDOWN)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.ENABLE_GRACEFUL_TXN_SHUTDOWN.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(EJB3SubsystemModel.ENABLE_GRACEFUL_TXN_SHUTDOWN).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(STATISTICS_ENABLED)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.STATISTICS.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.ENABLED.getLocalName(), model.get(EJB3SubsystemModel.STATISTICS_ENABLED).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(LOG_SYSTEM_EXCEPTIONS)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.LOG_SYSTEM_EXCEPTIONS.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(EJB3SubsystemModel.LOG_SYSTEM_EXCEPTIONS).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(ALLOW_EJB_NAME_REGEX)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.ALLOW_EJB_NAME_REGEX.getLocalName());
        writer.writeAttribute(EJB3SubsystemXMLAttribute.VALUE.getLocalName(), model.get(EJB3SubsystemModel.ALLOW_EJB_NAME_REGEX).asString());
        writer.writeEndElement();
    }
    if (model.hasDefined(SERVER_INTERCEPTORS)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.SERVER_INTERCEPTORS.getLocalName());
        for (final ModelNode interceptor : model.get(SERVER_INTERCEPTORS).asList()) {
            writer.writeStartElement(EJB3SubsystemXMLElement.INTERCEPTOR.getLocalName());
            writer.writeAttribute(EJB3SubsystemXMLAttribute.MODULE.getLocalName(), interceptor.get(EJB3SubsystemXMLAttribute.MODULE.getLocalName()).asString());
            writer.writeAttribute(EJB3SubsystemXMLAttribute.CLASS.getLocalName(), interceptor.get(EJB3SubsystemXMLAttribute.CLASS.getLocalName()).asString());
            writer.writeEndElement();
        }
        writer.writeEndElement();
    }
    if (model.hasDefined(CLIENT_INTERCEPTORS)) {
        writer.writeStartElement(EJB3SubsystemXMLElement.CLIENT_INTERCEPTORS.getLocalName());
        for (final ModelNode interceptor : model.get(CLIENT_INTERCEPTORS).asList()) {
            writer.writeStartElement(EJB3SubsystemXMLElement.INTERCEPTOR.getLocalName());
            writer.writeAttribute(EJB3SubsystemXMLAttribute.MODULE.getLocalName(), interceptor.get(EJB3SubsystemXMLAttribute.MODULE.getLocalName()).asString());
            writer.writeAttribute(EJB3SubsystemXMLAttribute.CLASS.getLocalName(), interceptor.get(EJB3SubsystemXMLAttribute.CLASS.getLocalName()).asString());
            writer.writeEndElement();
        }
        writer.writeEndElement();
    }
}
192687.9750182wildfly
private void parseProxy(XMLExtendedStreamReader reader, List<ModelNode> list, PathAddress parent) throws XMLStreamException {
    String name = schema.since(ModClusterSubsystemSchema.MODCLUSTER_4_0) ? require(reader, XMLAttribute.NAME) : "default";
    PathAddress address = parent.append(ProxyConfigurationResourceDefinition.pathElement(name));
    ModelNode operation = Util.createAddOperation(address);
    list.add(operation);
    int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        requireNoNamespaceAttribute(reader, i);
        XMLAttribute attribute = XMLAttribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ADVERTISE_SOCKET:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.ADVERTISE_SOCKET);
                    break;
                }
            case PROXY_URL:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.PROXY_URL);
                    break;
                }
            case ADVERTISE:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.ADVERTISE);
                    break;
                }
            case ADVERTISE_SECURITY_KEY:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.ADVERTISE_SECURITY_KEY);
                    break;
                }
            case EXCLUDED_CONTEXTS:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.EXCLUDED_CONTEXTS);
                    break;
                }
            case AUTO_ENABLE_CONTEXTS:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.AUTO_ENABLE_CONTEXTS);
                    break;
                }
            case STOP_CONTEXT_TIMEOUT:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.STOP_CONTEXT_TIMEOUT);
                    break;
                }
            case SOCKET_TIMEOUT:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.SOCKET_TIMEOUT);
                    break;
                }
            case STICKY_SESSION:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.STICKY_SESSION);
                    break;
                }
            case STICKY_SESSION_REMOVE:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.STICKY_SESSION_REMOVE);
                    break;
                }
            case STICKY_SESSION_FORCE:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.STICKY_SESSION_FORCE);
                    break;
                }
            case WORKER_TIMEOUT:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.WORKER_TIMEOUT);
                    break;
                }
            case MAX_ATTEMPTS:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.MAX_ATTEMPTS);
                    break;
                }
            case FLUSH_PACKETS:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.FLUSH_PACKETS);
                    break;
                }
            case FLUSH_WAIT:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.FLUSH_WAIT);
                    break;
                }
            case PING:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.PING);
                    break;
                }
            case SMAX:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.SMAX);
                    break;
                }
            case TTL:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.TTL);
                    break;
                }
            case NODE_TIMEOUT:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.NODE_TIMEOUT);
                    break;
                }
            case BALANCER:
                {
                    readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.BALANCER);
                    break;
                }
            case PROXY_LIST:
                {
                    if (this.schema.since(ModClusterSubsystemSchema.MODCLUSTER_6_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    ROOT_LOGGER.ignoredAttribute(attribute.getLocalName(), reader.getLocalName());
                    break;
                }
            case DOMAIN:
                {
                    if (schema == ModClusterSubsystemSchema.MODCLUSTER_1_0) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.LOAD_BALANCING_GROUP);
                        break;
                    }
                }
            case LOAD_BALANCING_GROUP:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_1_1)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.LOAD_BALANCING_GROUP);
                        break;
                    }
                }
            case CONNECTOR:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_1_1) && !schema.since(ModClusterSubsystemSchema.MODCLUSTER_4_0)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.LISTENER);
                        break;
                    } else {
                        throw unexpectedAttribute(reader, i);
                    }
                }
            case SESSION_DRAINING_STRATEGY:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_1_2)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.SESSION_DRAINING_STRATEGY);
                        break;
                    }
                }
            case STATUS_INTERVAL:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_2_0)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.STATUS_INTERVAL);
                        break;
                    }
                }
            case PROXIES:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_2_0)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.PROXIES);
                        break;
                    }
                }
            case SSL_CONTEXT:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_3_0)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.SSL_CONTEXT);
                        break;
                    }
                }
            case NAME:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_4_0)) {
                        break;
                    }
                }
            case LISTENER:
                {
                    if (schema.since(ModClusterSubsystemSchema.MODCLUSTER_4_0)) {
                        readAttribute(reader, i, operation, ProxyConfigurationResourceDefinition.Attribute.LISTENER);
                        break;
                    }
                }
            default:
                {
                    throw unexpectedAttribute(reader, i);
                }
        }
    }
    if (schema == ModClusterSubsystemSchema.MODCLUSTER_1_0) {
        setAttribute(reader, "ajp", operation, ProxyConfigurationResourceDefinition.Attribute.LISTENER);
    }
    while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {
        XMLElement element = XMLElement.forName(reader.getLocalName());
        switch(element) {
            case SIMPLE_LOAD_PROVIDER:
                {
                    this.parseSimpleLoadProvider(reader, list, address);
                    break;
                }
            case DYNAMIC_LOAD_PROVIDER:
                {
                    this.parseDynamicLoadProvider(reader, list, address);
                    break;
                }
            case SSL:
                {
                    ROOT_LOGGER.ignoredElement(element.getLocalName());
                    ParseUtils.requireNoContent(reader);
                    break;
                }
            default:
                {
                    throw unexpectedElement(reader);
                }
        }
    }
}
193292.4874127wildfly
private boolean isAttributeExcluded(PathAddress address, String attrName, ModelNode attrDesc, ModelNode resourceNoDefaults) {
    if (!attrDesc.get(ACCESS_TYPE).isDefined() || !attrDesc.get(ACCESS_TYPE).asString().equalsIgnoreCase("read-write")) {
        return true;
    }
    if (attrDesc.get(STORAGE).isDefined() && !attrDesc.get(STORAGE).asString().equalsIgnoreCase("configuration")) {
        return true;
    }
    if (attrDesc.get(ModelDescriptionConstants.DEPRECATED).isDefined()) {
        return true;
    }
    if ("default-web-module".equals(attrName)) {
        if (address.size() > 1) {
            PathElement subPe = address.getElement(0);
            if ("subsystem".equals(subPe.getKey()) && "web".equals(subPe.getValue()) && "virtual-server".equals(address.getLastElement().getKey())) {
                return true;
            }
        }
    } else if ("policy-modules".equals(attrName) || "login-modules".equals(attrName)) {
        if (address.size() > 2) {
            PathElement subPe = address.getElement(0);
            if ("subsystem".equals(subPe.getKey()) && "security".equals(subPe.getValue()) && "security-domain".equals(address.getElement(1).getKey())) {
                return true;
            }
        }
    } else if ("virtual-nodes".equals(attrName)) {
        if (address.size() == 3) {
            PathElement subPe = address.getElement(0);
            PathElement containerPe = address.getElement(1);
            PathElement distPe = address.getElement(2);
            if ("subsystem".equals(subPe.getKey()) && "infinispan".equals(subPe.getValue()) && "cache-container".equals(containerPe.getKey()) && "distributed-cache".equals(distPe.getKey())) {
                return true;
            }
        }
    } else if (address.size() > 0 && "transactions".equals(address.getLastElement().getValue()) && "subsystem".equals(address.getLastElement().getKey())) {
        if (attrName.contains("jdbc")) {
            return !resourceNoDefaults.hasDefined("use-jdbc-store") || !resourceNoDefaults.get("use-jdbc-store").asBoolean();
        } else if (attrName.contains("journal")) {
            return !resourceNoDefaults.hasDefined("use-journal-store") || !resourceNoDefaults.get("use-journal-store").asBoolean();
        }
    } else if ("security-application".equals(attrName)) {
        if (address.size() == 3) {
            PathElement subPe = address.getElement(0);
            PathElement raPe = address.getElement(1);
            PathElement connPe = address.getElement(2);
            if ("subsystem".equals(subPe.getKey()) && "resource-adapters".equals(subPe.getValue()) && "resource-adapter".equals(raPe.getKey()) && "connection-definitions".equals(connPe.getKey())) {
                return true;
            }
        }
    } else if (attrName.startsWith("wm-security")) {
        if (address.size() == 2) {
            PathElement subPe = address.getElement(0);
            PathElement raPe = address.getElement(1);
            if ("subsystem".equals(subPe.getKey()) && "resource-adapters".equals(subPe.getValue()) && "resource-adapter".equals(raPe.getKey())) {
                return true;
            }
        }
    } else if ("transaction-support".equals(attrName)) {
        if (address.size() == 2) {
            PathElement subPe = address.getElement(0);
            PathElement raPe = address.getElement(1);
            if ("subsystem".equals(subPe.getKey()) && "resource-adapters".equals(subPe.getValue()) && "resource-adapter".equals(raPe.getKey())) {
                return true;
            }
        }
    } else if ("pool-fair".equals(attrName) || "pad-xid".equals(attrName) || "interleaving".equals(attrName) || "no-tx-separate-pool".equals(attrName) || "wrap-xa-resource".equals(attrName)) {
        if (address.size() == 3) {
            PathElement subPe = address.getElement(0);
            PathElement raPe = address.getElement(1);
            PathElement connPe = address.getElement(2);
            if ("subsystem".equals(subPe.getKey()) && "resource-adapters".equals(subPe.getValue()) && "resource-adapter".equals(raPe.getKey()) && "connection-definitions".equals(connPe.getKey())) {
                return true;
            }
        }
    } else if ("fixed-source-port".equals(attrName)) {
        if (address.size() == 2) {
            PathElement socketBindingGroupPe = address.getElement(0);
            PathElement remoteDestPe = address.getElement(1);
            if ("socket-binding-group".equals(socketBindingGroupPe.getKey()) && "remote-destination-outbound-socket-binding".equals(remoteDestPe.getKey())) {
                return true;
            }
        }
    } else if ("console-enabled".equals(attrName)) {
        if (address.size() == 2) {
            PathElement coreServicePe = address.getElement(0);
            PathElement mngmtIfPe = address.getElement(1);
            if ("core-service".equals(coreServicePe.getKey()) && "management".equals(coreServicePe.getValue()) && "management-interface".equals(mngmtIfPe.getKey()) && "http-interface".equals(mngmtIfPe.getValue())) {
                return true;
            }
        }
    } else if ("async-registration".equals(attrName)) {
        if (address.size() > 0) {
            PathElement coreServicePe = address.getElement(0);
            if ("subsystem".equals(coreServicePe.getKey()) && "xts".equals(coreServicePe.getValue())) {
                return true;
            }
        }
    } else if ("path".equals(attrName) && address.size() == 1 && "path".equals(address.getElement(0).getKey())) {
        try {
            return readAttribute(address, "read-only").asBoolean();
        } catch (IOException | MgmtOperationException e) {
            throw new RuntimeException(e);
        }
    }
    return false;
}
206546.01247cassandra
public void testMultipleClustering() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a, b, c, d))");
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 0, 1, 1);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 0, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, 1, 1, 1);
    assertEmpty(execute("SELECT * FROM %s WHERE a = 0 AND (b, c, d) IN ()"));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = (?)", 0, 1), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) = ?", 0, tuple(1)), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) = (?, ?)", 0, 1, 1), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) = ?", 0, tuple(1, 1)), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) = (?, ?, ?)", 0, 1, 1, 1), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) = ?", 0, tuple(1, 1, 1)), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?)", 0, 0), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?)", 0, 0), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) > (?, ?)", 0, 1, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) >= (?, ?)", 0, 1, 0), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?)", 0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) >= (?, ?, ?)", 0, 1, 1, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?)", 0, 1), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?)", 0, 1), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1), row(0, 1, 0, 0), row(0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) < (?, ?)", 0, 0, 1), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) <= (?, ?)", 0, 0, 1), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) < (?, ?, ?)", 0, 0, 1, 1), row(0, 0, 0, 0), row(0, 0, 1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) <= (?, ?, ?)", 0, 0, 1, 1), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b) < (?)", 0, 0, 1, 0, 1), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND b < ?", 0, 0, 1, 0, 1), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?)", 0, 0, 1, 1, 1, 1), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c, d) < (?, ?, ?)", 0, 0, 1, 1, 1, 1, 0), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > ? AND (b, c, d) < ?", 0, tuple(0, 1, 1), tuple(1, 1, 0)), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) > (?) ORDER BY b DESC, c DESC, d DESC", 0, 0), row(0, 1, 1, 1), row(0, 1, 1, 0), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) >= (?) ORDER BY b DESC, c DESC, d DESC", 0, 0), row(0, 1, 1, 1), row(0, 1, 1, 0), row(0, 1, 0, 0), row(0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) > (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 0), row(0, 1, 1, 1), row(0, 1, 1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) >= (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 0), row(0, 1, 1, 1), row(0, 1, 1, 0), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 1, 0), row(0, 1, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) >= (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 1, 1, 0), row(0, 1, 1, 1), row(0, 1, 1, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) < (?) ORDER BY b DESC, c DESC, d DESC", 0, 1), row(0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b) <= (?) ORDER BY b DESC, c DESC, d DESC", 0, 1), row(0, 1, 1, 1), row(0, 1, 1, 0), row(0, 1, 0, 0), row(0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) < (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) <= (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1), row(0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) < (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) <= (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1), row(0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b) < (?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 0, 1), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND b < ? ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 0, 1), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c) < (?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1, 1, 1), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) > (?, ?, ?) AND (b, c, d) < (?, ?, ?) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1, 1, 1, 1, 0), row(0, 1, 0, 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN ((?, ?, ?), (?, ?, ?))", 0, 0, 1, 0, 0, 1, 1), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN (?, ?)", 0, tuple(0, 1, 0), tuple(0, 1, 1)), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN ?", 0, list(tuple(0, 1, 0), tuple(0, 1, 1))), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c, d) IN (?, ?)", 0, tuple(0, 1, 1), tuple(0, 1, 0)), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? and (b, c) IN ((?, ?))", 0, 0, 1), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a = ? and (b) IN ((?))", 0, 0), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1));
    assertEmpty(execute("SELECT * FROM %s WHERE a = ? and (b) IN ()", 0));
    assertRows(execute("SELECT * FROM %s WHERE a = ? AND (b, c) IN ((?, ?)) ORDER BY b DESC, c DESC, d DESC", 0, 0, 1), row(0, 0, 1, 1), row(0, 0, 1, 0));
    assertEmpty(execute("SELECT * FROM %s WHERE a = ? AND (b, c) IN () ORDER BY b DESC, c DESC, d DESC", 0));
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 0, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 1, 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, 0, 1, 1);
    assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) AND (b, c, d) IN (?, ?)", 0, 1, tuple(0, 1, 0), tuple(0, 1, 1)), row(0, 0, 1, 0), row(0, 0, 1, 1), row(1, 0, 1, 0), row(1, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a IN ? AND (b, c, d) IN ?", list(0, 1), list(tuple(0, 1, 0), tuple(0, 1, 1))), row(0, 0, 1, 0), row(0, 0, 1, 1), row(1, 0, 1, 0), row(1, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) AND (b, c, d) IN (?, ?)", 1, 0, tuple(0, 1, 1), tuple(0, 1, 0)), row(0, 0, 1, 0), row(0, 0, 1, 1), row(1, 0, 1, 0), row(1, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) and (b, c) IN ((?, ?))", 0, 1, 0, 1), row(0, 0, 1, 0), row(0, 0, 1, 1), row(1, 0, 1, 0), row(1, 0, 1, 1));
    assertRows(execute("SELECT * FROM %s WHERE a IN (?, ?) and (b) IN ((?))", 0, 1, 0), row(0, 0, 0, 0), row(0, 0, 1, 0), row(0, 0, 1, 1), row(1, 0, 0, 0), row(1, 0, 1, 0), row(1, 0, 1, 1));
}
203854.3228199elasticsearch
public static FunctionRef create(PainlessLookup painlessLookup, FunctionTable functionTable, Location location, Class<?> targetClass, String typeName, String methodName, int numberOfCaptures, Map<String, Object> constants, boolean needsScriptInstance) {
    Objects.requireNonNull(painlessLookup);
    Objects.requireNonNull(targetClass);
    Objects.requireNonNull(typeName);
    Objects.requireNonNull(methodName);
    String targetClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
    PainlessMethod interfaceMethod;
    try {
        interfaceMethod = painlessLookup.lookupFunctionalInterfacePainlessMethod(targetClass);
        if (interfaceMethod == null) {
            throw new IllegalArgumentException(Strings.format("cannot convert function reference [%s::%s] to a non-functional interface [%s]", typeName, methodName, targetClassName));
        }
        String interfaceMethodName = interfaceMethod.javaMethod().getName();
        MethodType interfaceMethodType = interfaceMethod.methodType().dropParameterTypes(0, 1);
        String delegateClassName;
        boolean isDelegateInterface;
        boolean isDelegateAugmented;
        int delegateInvokeType;
        String delegateMethodName;
        MethodType delegateMethodType;
        Object[] delegateInjections;
        Class<?> delegateMethodReturnType;
        List<Class<?>> delegateMethodParameters;
        int interfaceTypeParametersSize = interfaceMethod.typeParameters().size();
        if ("this".equals(typeName)) {
            Objects.requireNonNull(functionTable);
            if (numberOfCaptures < 0) {
                throw new IllegalStateException("internal error");
            }
            String localFunctionKey = FunctionTable.buildLocalFunctionKey(methodName, numberOfCaptures + interfaceTypeParametersSize);
            LocalFunction localFunction = functionTable.getFunction(localFunctionKey);
            if (localFunction == null) {
                throw new IllegalArgumentException(Strings.format("function reference [this::%s] matching [%s, %s/%d] not found%s", localFunctionKey, targetClassName, interfaceMethodName, interfaceTypeParametersSize, localFunctionKey.contains("$") ? " due to an incorrect number of arguments" : ""));
            }
            delegateClassName = CLASS_NAME;
            isDelegateInterface = false;
            isDelegateAugmented = false;
            delegateInvokeType = needsScriptInstance ? H_INVOKEVIRTUAL : H_INVOKESTATIC;
            delegateMethodName = localFunction.getMangledName();
            delegateMethodType = localFunction.getMethodType();
            delegateInjections = new Object[0];
            delegateMethodReturnType = localFunction.getReturnType();
            delegateMethodParameters = localFunction.getTypeParameters();
        } else if ("new".equals(methodName)) {
            if (numberOfCaptures != 0) {
                throw new IllegalStateException("internal error");
            }
            PainlessConstructor painlessConstructor = painlessLookup.lookupPainlessConstructor(typeName, interfaceTypeParametersSize);
            if (painlessConstructor == null) {
                throw new IllegalArgumentException(Strings.format("function reference [%s::new/%d] matching [%s, %s/%d] not found", typeName, interfaceTypeParametersSize, targetClassName, interfaceMethodName, interfaceTypeParametersSize));
            }
            delegateClassName = painlessConstructor.javaConstructor().getDeclaringClass().getName();
            isDelegateInterface = false;
            isDelegateAugmented = false;
            delegateInvokeType = H_NEWINVOKESPECIAL;
            delegateMethodName = PainlessLookupUtility.CONSTRUCTOR_NAME;
            delegateMethodType = painlessConstructor.methodType();
            delegateInjections = new Object[0];
            delegateMethodReturnType = painlessConstructor.javaConstructor().getDeclaringClass();
            delegateMethodParameters = painlessConstructor.typeParameters();
        } else {
            if (numberOfCaptures != 0 && numberOfCaptures != 1) {
                throw new IllegalStateException("internal error");
            }
            boolean captured = numberOfCaptures == 1;
            PainlessMethod painlessMethod = painlessLookup.lookupPainlessMethod(typeName, true, methodName, interfaceTypeParametersSize);
            if (painlessMethod == null) {
                painlessMethod = painlessLookup.lookupPainlessMethod(typeName, false, methodName, captured ? interfaceTypeParametersSize : interfaceTypeParametersSize - 1);
                if (painlessMethod == null) {
                    throw new IllegalArgumentException(Strings.format("function reference [%s::%s/%d] matching [%s, %s/%d] not found", typeName, methodName, interfaceTypeParametersSize, targetClassName, interfaceMethodName, interfaceTypeParametersSize));
                }
            } else if (captured) {
                throw new IllegalArgumentException(Strings.format("cannot use a static method as a function reference [%s::%s/%d] with a non-static captured variable", typeName, methodName, interfaceTypeParametersSize));
            }
            delegateClassName = painlessMethod.javaMethod().getDeclaringClass().getName();
            isDelegateInterface = painlessMethod.javaMethod().getDeclaringClass().isInterface();
            isDelegateAugmented = painlessMethod.javaMethod().getDeclaringClass() != painlessMethod.targetClass();
            if (Modifier.isStatic(painlessMethod.javaMethod().getModifiers())) {
                delegateInvokeType = H_INVOKESTATIC;
            } else if (isDelegateInterface) {
                delegateInvokeType = H_INVOKEINTERFACE;
            } else {
                delegateInvokeType = H_INVOKEVIRTUAL;
            }
            delegateMethodName = painlessMethod.javaMethod().getName();
            delegateMethodType = painlessMethod.methodType();
            if (delegateInvokeType != H_INVOKESTATIC && painlessMethod.javaMethod().getDeclaringClass() != painlessMethod.methodType().parameterType(0)) {
                if (painlessMethod.methodType().parameterType(0) != Object.class) {
                    throw new IllegalStateException("internal error");
                }
                delegateMethodType = delegateMethodType.changeParameterType(0, painlessMethod.javaMethod().getDeclaringClass());
            }
            delegateInjections = PainlessLookupUtility.buildInjections(painlessMethod, constants);
            delegateMethodReturnType = painlessMethod.returnType();
            if (delegateMethodType.parameterList().size() > painlessMethod.typeParameters().size()) {
                delegateMethodParameters = new ArrayList<>(painlessMethod.typeParameters());
                delegateMethodParameters.add(0, delegateMethodType.parameterType(0));
            } else {
                delegateMethodParameters = painlessMethod.typeParameters();
            }
        }
        if (location != null) {
            for (int typeParameter = 0; typeParameter < interfaceTypeParametersSize; ++typeParameter) {
                Class<?> from = interfaceMethod.typeParameters().get(typeParameter);
                Class<?> to = delegateMethodParameters.get(numberOfCaptures + typeParameter);
                AnalyzerCaster.getLegalCast(location, from, to, false, true);
            }
            if (interfaceMethod.returnType() != void.class) {
                AnalyzerCaster.getLegalCast(location, delegateMethodReturnType, interfaceMethod.returnType(), false, true);
            }
        }
        MethodType factoryMethodType = MethodType.methodType(targetClass, delegateMethodType.dropParameterTypes(numberOfCaptures, delegateMethodType.parameterCount()));
        delegateMethodType = delegateMethodType.dropParameterTypes(0, numberOfCaptures);
        return new FunctionRef(interfaceMethodName, interfaceMethodType, delegateClassName, isDelegateInterface, isDelegateAugmented, delegateInvokeType, delegateMethodName, delegateMethodType, delegateInjections, factoryMethodType, needsScriptInstance ? WriterConstants.CLASS_TYPE : null);
    } catch (IllegalArgumentException iae) {
        if (location != null) {
            throw location.createError(iae);
        }
        throw iae;
    }
}
207312.141243elasticsearch
public void testObjectCasts() {
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Number n = o;"));
    exec("Object o = Integer.valueOf(0); Number n = (Number)o;");
    exec("Object o = null; Number n = (Number)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = 'string'; String n = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; String n = o;"));
    exec("Object o = 'string'; String n = (String)o;");
    exec("Object o = null; String n = (String)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); boolean b = (boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; boolean b = (boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf((int)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf((int)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf(0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf(0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Boolean.valueOf(true); Boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Boolean b = o;"));
    exec("Object o = Boolean.valueOf(true); Boolean b = (Boolean)o;");
    exec("Object o = null; Boolean b = (Boolean)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Byte b = o;"));
    exec("Object o = Byte.valueOf((byte)0); Byte b = (Byte)o;");
    exec("Object o = null; Byte b = (Byte)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((byte)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Short b = o;"));
    exec("Object o = Short.valueOf((byte)0); Short b = (Short)o;");
    exec("Object o = null; Short b = (Short)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((short)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((short)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Character b = o;"));
    exec("Object o = Character.valueOf((char)0); Character b = (Character)o;");
    exec("Object o = null; Character b = (Character)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Integer b = o;"));
    exec("Object o = Integer.valueOf(0); Integer b = (Integer)o;");
    exec("Object o = null; Integer b = (Integer)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Long b = o;"));
    exec("Object o = Long.valueOf((long)0); Long b = (Long)o;");
    exec("Object o = null; Long b = (Long)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((long)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Float b = o;"));
    exec("Object o = Float.valueOf((long)0); Float b = (Float)o;");
    exec("Object o = null; Float b = (Float)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((double)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Double.valueOf((long)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = null; Double b = o;"));
    exec("Object o = Double.valueOf((long)0); Double b = (Double)o;");
    exec("Object o = null; Double b = (Double)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Byte.valueOf((byte)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Short.valueOf((short)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Character.valueOf((char)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Integer.valueOf(0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Long.valueOf((long)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = Float.valueOf((float)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Object o = new ArrayList(); ArrayList b = o;"));
    exec("Object o = new ArrayList(); ArrayList b = (ArrayList)o;");
}
207428.691244elasticsearch
public void testNumberCasts() {
    exec("Number o = Integer.valueOf(0); Object n = o;");
    exec("Number o = null; Object n = o;");
    exec("Number o = Integer.valueOf(0); Object n = (Object)o;");
    exec("Number o = null; Object n = (Object)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = 'string'; String n = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; String n = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = 'string'; String n = (String)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; String n = (String)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); boolean b = (boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; boolean b = (boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); byte b = (byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); short b = (short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); char b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); char b = (char)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf((int)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf((int)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); int b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); int b = (int)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); long b = (long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf(0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf(0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); float b = (float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); double b = (double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); Boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Boolean b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Boolean.valueOf(true); Boolean b = (Boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Boolean b = (Boolean)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Byte b = o;"));
    exec("Number o = Byte.valueOf((byte)0); Byte b = (Byte)o;");
    exec("Number o = null; Byte b = (Byte)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Byte b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Byte b = (Byte)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((byte)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Short b = o;"));
    exec("Number o = Short.valueOf((byte)0); Short b = (Short)o;");
    exec("Number o = null; Short b = (Short)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((short)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((short)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Short b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Short b = (Short)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Character b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Character b = (Character)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Integer b = o;"));
    exec("Number o = Integer.valueOf(0); Integer b = (Integer)o;");
    exec("Number o = null; Integer b = (Integer)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Integer b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Integer b = (Integer)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Long b = o;"));
    exec("Number o = Long.valueOf((long)0); Long b = (Long)o;");
    exec("Number o = null; Long b = (Long)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Long b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Long b = (Long)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((long)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Float b = o;"));
    exec("Number o = Float.valueOf((long)0); Float b = (Float)o;");
    exec("Number o = null; Float b = (Float)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Float b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((double)0); Float b = (Float)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Double.valueOf((long)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = null; Double b = o;"));
    exec("Number o = Double.valueOf((long)0); Double b = (Double)o;");
    exec("Number o = null; Double b = (Double)o;");
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Byte.valueOf((byte)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Short.valueOf((short)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Character.valueOf((char)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Integer.valueOf(0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Long.valueOf((long)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Double b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = Float.valueOf((float)0); Double b = (Double)o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = new ArrayList(); ArrayList b = o;"));
    expectScriptThrows(ClassCastException.class, () -> exec("Number o = new ArrayList(); ArrayList b = (ArrayList)o;"));
}
209869.211221elasticsearch
public void testMinMaxChildren() throws Exception {
    assertAcked(prepareCreate("test").setMapping(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child")));
    ensureGreen();
    indexRandom(true, createMinMaxDocBuilders().toArray(new IndexRequestBuilder[0]));
    assertResponse(minMaxQuery(ScoreMode.None, 1, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.None, 2, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.None, 3, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
    });
    assertHitCount(minMaxQuery(ScoreMode.None, 4, null), 0L);
    assertResponse(minMaxQuery(ScoreMode.None, 1, 4), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.None, 1, 3), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.None, 1, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.None, 2, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1f));
    });
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> minMaxQuery(ScoreMode.None, 3, 2));
    assertThat(e.getMessage(), equalTo("[has_child] 'max_children' is less than 'min_children'"));
    assertResponse(minMaxQuery(ScoreMode.Total, 1, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(6f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Total, 2, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(6f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(3f));
    });
    assertResponse(minMaxQuery(ScoreMode.Total, 3, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(6f));
    });
    assertHitCount(minMaxQuery(ScoreMode.Total, 4, null), 0L);
    assertResponse(minMaxQuery(ScoreMode.Total, 1, 4), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(6f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Total, 1, 3), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(6f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Total, 1, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Total, 2, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
    });
    e = expectThrows(IllegalArgumentException.class, () -> minMaxQuery(ScoreMode.Total, 3, 2));
    assertThat(e.getMessage(), equalTo("[has_child] 'max_children' is less than 'min_children'"));
    assertResponse(minMaxQuery(ScoreMode.Max, 1, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Max, 2, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(2f));
    });
    assertResponse(minMaxQuery(ScoreMode.Max, 3, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
    });
    assertHitCount(minMaxQuery(ScoreMode.Max, 4, null), 0L);
    assertResponse(minMaxQuery(ScoreMode.Max, 1, 4), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Max, 1, 3), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(3f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Max, 1, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Max, 2, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
    });
    e = expectThrows(IllegalArgumentException.class, () -> minMaxQuery(ScoreMode.Max, 3, 2));
    assertThat(e.getMessage(), equalTo("[has_child] 'max_children' is less than 'min_children'"));
    assertResponse(minMaxQuery(ScoreMode.Avg, 1, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1.5f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Avg, 2, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1.5f));
    });
    assertResponse(minMaxQuery(ScoreMode.Avg, 3, null), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
    });
    assertHitCount(minMaxQuery(ScoreMode.Avg, 4, null), 0L);
    assertResponse(minMaxQuery(ScoreMode.Avg, 1, 4), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1.5f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Avg, 1, 3), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(3L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("4"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(2f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1.5f));
        assertThat(response.getHits().getHits()[2].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[2].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Avg, 1, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(2L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1.5f));
        assertThat(response.getHits().getHits()[1].getId(), equalTo("2"));
        assertThat(response.getHits().getHits()[1].getScore(), equalTo(1f));
    });
    assertResponse(minMaxQuery(ScoreMode.Avg, 2, 2), response -> {
        assertThat(response.getHits().getTotalHits().value, equalTo(1L));
        assertThat(response.getHits().getHits()[0].getId(), equalTo("3"));
        assertThat(response.getHits().getHits()[0].getScore(), equalTo(1.5f));
    });
    e = expectThrows(IllegalArgumentException.class, () -> minMaxQuery(ScoreMode.Avg, 3, 2));
    assertThat(e.getMessage(), equalTo("[has_child] 'max_children' is less than 'min_children'"));
}
204886.843286elasticsearch
public void testCrossFieldMode() throws ExecutionException, InterruptedException {
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.OR))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.OR))), response -> {
        assertFirstHit(response, hasId("theother"));
        assertSecondHit(response, hasId("theone"));
        assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore()));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.OR))), response -> assertFirstHit(response, hasId("theother")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.AND))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category").lenient(true).operator(Operator.AND))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill", "int-field").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category").lenient(true).operator(Operator.AND))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america 15", "skill", "full_name", "first_name", "last_name", "category", "int-field").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category").lenient(true).operator(Operator.AND))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america 15", "first_name", "last_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("25 15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("25 15", "first_name", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category"))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category").operator(Operator.OR))), response -> assertFirstHit(response, hasId("theone")));
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).analyzer("category").operator(Operator.AND))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertHitCount(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type(randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : MultiMatchQueryBuilder.DEFAULT_TYPE).operator(Operator.AND))), 0L);
    assertHitCount(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category").type(randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : MultiMatchQueryBuilder.DEFAULT_TYPE).operator(Operator.AND))), 0L);
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "category").field("last_name", 10).type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.AND))), response -> {
        assertHitCount(response, 2L);
        assertFirstHit(response, hasId("ultimate1"));
        assertSecondHit(response, hasId("ultimate2"));
        assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore()));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).operator(Operator.AND))), response -> {
        assertHitCount(response, 2L);
        assertFirstHit(response, hasId("ultimate2"));
        assertSecondHit(response, hasId("ultimate1"));
        assertThat(response.getHits().getHits()[0].getScore(), greaterThan(response.getHits().getHits()[1].getScore()));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "skill", "first_name").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "int-field", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("theone"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("alpha 15", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("ultimate1"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("alpha 15", "int-field", "first_name", "skill").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).lenient(true))), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("ultimate1"));
    });
    assertResponse(prepareSearch("test").setQuery(randomizeType(multiMatchQuery("now", "f*", "date").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).lenient(true)), response -> {
        assertHitCount(response, 1L);
        assertFirstHit(response, hasId("nowHero"));
    });
}
206266.781258elasticsearch
public void testFormatsValidParsing() {
    assertParses("1522332219", "epoch_second");
    assertParses("0", "epoch_second");
    assertParses("1", "epoch_second");
    assertParses("1522332219321", "epoch_millis");
    assertParses("0", "epoch_millis");
    assertParses("1", "epoch_millis");
    assertParses("20181126", "basic_date");
    assertParses("20181126T121212.123Z", "basic_date_time");
    assertParses("20181126T121212.123+10:00", "basic_date_time");
    assertParses("20181126T121212.123-0800", "basic_date_time");
    assertParses("20181126T121212Z", "basic_date_time_no_millis");
    assertParses("20181126T121212+01:00", "basic_date_time_no_millis");
    assertParses("20181126T121212+0100", "basic_date_time_no_millis");
    assertParses("2018363", "basic_ordinal_date");
    assertParses("2018363T121212.1Z", "basic_ordinal_date_time");
    assertParses("2018363T121212.123Z", "basic_ordinal_date_time");
    assertParses("2018363T121212.123456789Z", "basic_ordinal_date_time");
    assertParses("2018363T121212.123+0100", "basic_ordinal_date_time");
    assertParses("2018363T121212.123+01:00", "basic_ordinal_date_time");
    assertParses("2018363T121212Z", "basic_ordinal_date_time_no_millis");
    assertParses("2018363T121212+0100", "basic_ordinal_date_time_no_millis");
    assertParses("2018363T121212+01:00", "basic_ordinal_date_time_no_millis");
    assertParses("121212.1Z", "basic_time");
    assertParses("121212.123Z", "basic_time");
    assertParses("121212.123456789Z", "basic_time");
    assertParses("121212.1+0100", "basic_time");
    assertParses("121212.123+0100", "basic_time");
    assertParses("121212.123+01:00", "basic_time");
    assertParses("121212Z", "basic_time_no_millis");
    assertParses("121212+0100", "basic_time_no_millis");
    assertParses("121212+01:00", "basic_time_no_millis");
    assertParses("T121212.1Z", "basic_t_time");
    assertParses("T121212.123Z", "basic_t_time");
    assertParses("T121212.123456789Z", "basic_t_time");
    assertParses("T121212.1+0100", "basic_t_time");
    assertParses("T121212.123+0100", "basic_t_time");
    assertParses("T121212.123+01:00", "basic_t_time");
    assertParses("T121212Z", "basic_t_time_no_millis");
    assertParses("T121212+0100", "basic_t_time_no_millis");
    assertParses("T121212+01:00", "basic_t_time_no_millis");
    assertParses("2018W313", "basic_week_date");
    assertParses("1W313", "basic_week_date");
    assertParses("18W313", "basic_week_date");
    assertParses("2018W313T121212.1Z", "basic_week_date_time");
    assertParses("2018W313T121212.123Z", "basic_week_date_time");
    assertParses("2018W313T121212.123456789Z", "basic_week_date_time");
    assertParses("2018W313T121212.123+0100", "basic_week_date_time");
    assertParses("2018W313T121212.123+01:00", "basic_week_date_time");
    assertParses("2018W313T121212Z", "basic_week_date_time_no_millis");
    assertParses("2018W313T121212+0100", "basic_week_date_time_no_millis");
    assertParses("2018W313T121212+01:00", "basic_week_date_time_no_millis");
    assertParses("2018-12-31", "date");
    assertParses("18-5-6", "date");
    assertParses("10000-5-6", "date");
    assertParses("2018-12-31T12", "date_hour");
    assertParses("2018-12-31T8", "date_hour");
    assertParses("2018-12-31T12:12", "date_hour_minute");
    assertParses("2018-12-31T8:3", "date_hour_minute");
    assertParses("2018-12-31T12:12:12", "date_hour_minute_second");
    assertParses("2018-12-31T12:12:1", "date_hour_minute_second");
    assertParses("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.123", "date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.123456789", "date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.1", "date_hour_minute_second_millis");
    assertParses("2018-12-31T12:12:12.123", "date_hour_minute_second_millis");
    assertParseException("2018-12-31T12:12:12.123456789", "date_hour_minute_second_millis");
    assertParses("2018-12-31T12:12:12.1", "date_hour_minute_second_millis");
    assertParses("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction");
    assertParses("2018-05", "date_optional_time");
    assertParses("2018-05-30", "date_optional_time");
    assertParses("2018-05-30T20", "date_optional_time");
    assertParses("2018-05-30T20:21", "date_optional_time");
    assertParses("2018-05-30T20:21:23", "date_optional_time");
    assertParses("2018-05-30T20:21:23.1", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123456789", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123Z", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123456789Z", "date_optional_time");
    assertParses("2018-05-30T20:21:23.1+0100", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123+0100", "date_optional_time");
    assertParses("2018-05-30T20:21:23.1+01:00", "date_optional_time");
    assertParses("2018-05-30T20:21:23.123+01:00", "date_optional_time");
    assertParses("2018-12-1", "date_optional_time");
    assertParses("2018-12-31T10:15:30", "date_optional_time");
    assertParses("2018-12-31T10:15:3", "date_optional_time");
    assertParses("2018-12-31T10:5:30", "date_optional_time");
    assertParses("2018-12-31T1:15:30", "date_optional_time");
    assertParses("2018-12-31T10:15:30.1Z", "date_time");
    assertParses("2018-12-31T10:15:30.123Z", "date_time");
    assertParses("2018-12-31T10:15:30.123456789Z", "date_time");
    assertParses("2018-12-31T10:15:30.1+0100", "date_time");
    assertParses("2018-12-31T10:15:30.123+0100", "date_time");
    assertParses("2018-12-31T10:15:30.123+01:00", "date_time");
    assertParses("2018-12-31T10:15:30.1+01:00", "date_time");
    assertParses("2018-12-31T10:15:30.11Z", "date_time");
    assertParses("2018-12-31T10:15:30.11+0100", "date_time");
    assertParses("2018-12-31T10:15:30.11+01:00", "date_time");
    assertParses("2018-12-31T10:15:3.1Z", "date_time");
    assertParses("2018-12-31T10:15:3.123Z", "date_time");
    assertParses("2018-12-31T10:15:3.123456789Z", "date_time");
    assertParses("2018-12-31T10:15:3.1+0100", "date_time");
    assertParses("2018-12-31T10:15:3.123+0100", "date_time");
    assertParses("2018-12-31T10:15:3.123+01:00", "date_time");
    assertParses("2018-12-31T10:15:3.1+01:00", "date_time");
    assertParses("2018-12-31T10:15:30Z", "date_time_no_millis");
    assertParses("2018-12-31T10:15:30+0100", "date_time_no_millis");
    assertParses("2018-12-31T10:15:30+01:00", "date_time_no_millis");
    assertParses("2018-12-31T10:5:30Z", "date_time_no_millis");
    assertParses("2018-12-31T10:5:30+0100", "date_time_no_millis");
    assertParses("2018-12-31T10:5:30+01:00", "date_time_no_millis");
    assertParses("2018-12-31T10:15:3Z", "date_time_no_millis");
    assertParses("2018-12-31T10:15:3+0100", "date_time_no_millis");
    assertParses("2018-12-31T10:15:3+01:00", "date_time_no_millis");
    assertParses("2018-12-31T1:15:30Z", "date_time_no_millis");
    assertParses("2018-12-31T1:15:30+0100", "date_time_no_millis");
    assertParses("2018-12-31T1:15:30+01:00", "date_time_no_millis");
    assertParses("12", "hour");
    assertParses("01", "hour");
    assertParses("1", "hour");
    assertParses("12:12", "hour_minute");
    assertParses("12:01", "hour_minute");
    assertParses("12:1", "hour_minute");
    assertParses("12:12:12", "hour_minute_second");
    assertParses("12:12:01", "hour_minute_second");
    assertParses("12:12:1", "hour_minute_second");
    assertParses("12:12:12.123", "hour_minute_second_fraction");
    assertParses("12:12:12.123456789", "hour_minute_second_fraction");
    assertParses("12:12:12.1", "hour_minute_second_fraction");
    assertParseException("12:12:12", "hour_minute_second_fraction");
    assertParses("12:12:12.123", "hour_minute_second_millis");
    assertParseException("12:12:12.123456789", "hour_minute_second_millis");
    assertParses("12:12:12.1", "hour_minute_second_millis");
    assertParseException("12:12:12", "hour_minute_second_millis");
    assertParses("2018-128", "ordinal_date");
    assertParses("2018-1", "ordinal_date");
    assertParses("2018-128T10:15:30.1Z", "ordinal_date_time");
    assertParses("2018-128T10:15:30.123Z", "ordinal_date_time");
    assertParses("2018-128T10:15:30.123456789Z", "ordinal_date_time");
    assertParses("2018-128T10:15:30.123+0100", "ordinal_date_time");
    assertParses("2018-128T10:15:30.123+01:00", "ordinal_date_time");
    assertParses("2018-1T10:15:30.1Z", "ordinal_date_time");
    assertParses("2018-1T10:15:30.123Z", "ordinal_date_time");
    assertParses("2018-1T10:15:30.123456789Z", "ordinal_date_time");
    assertParses("2018-1T10:15:30.123+0100", "ordinal_date_time");
    assertParses("2018-1T10:15:30.123+01:00", "ordinal_date_time");
    assertParses("2018-128T10:15:30Z", "ordinal_date_time_no_millis");
    assertParses("2018-128T10:15:30+0100", "ordinal_date_time_no_millis");
    assertParses("2018-128T10:15:30+01:00", "ordinal_date_time_no_millis");
    assertParses("2018-1T10:15:30Z", "ordinal_date_time_no_millis");
    assertParses("2018-1T10:15:30+0100", "ordinal_date_time_no_millis");
    assertParses("2018-1T10:15:30+01:00", "ordinal_date_time_no_millis");
    assertParses("10:15:30.1Z", "time");
    assertParses("10:15:30.123Z", "time");
    assertParses("10:15:30.123456789Z", "time");
    assertParses("10:15:30.123+0100", "time");
    assertParses("10:15:30.123+01:00", "time");
    assertParses("1:15:30.1Z", "time");
    assertParses("1:15:30.123Z", "time");
    assertParses("1:15:30.123+0100", "time");
    assertParses("1:15:30.123+01:00", "time");
    assertParses("10:1:30.1Z", "time");
    assertParses("10:1:30.123Z", "time");
    assertParses("10:1:30.123+0100", "time");
    assertParses("10:1:30.123+01:00", "time");
    assertParses("10:15:3.1Z", "time");
    assertParses("10:15:3.123Z", "time");
    assertParses("10:15:3.123+0100", "time");
    assertParses("10:15:3.123+01:00", "time");
    assertParseException("10:15:3.1", "time");
    assertParseException("10:15:3Z", "time");
    assertParses("10:15:30Z", "time_no_millis");
    assertParses("10:15:30+0100", "time_no_millis");
    assertParses("10:15:30+01:00", "time_no_millis");
    assertParses("01:15:30Z", "time_no_millis");
    assertParses("01:15:30+0100", "time_no_millis");
    assertParses("01:15:30+01:00", "time_no_millis");
    assertParses("1:15:30Z", "time_no_millis");
    assertParses("1:15:30+0100", "time_no_millis");
    assertParses("1:15:30+01:00", "time_no_millis");
    assertParses("10:5:30Z", "time_no_millis");
    assertParses("10:5:30+0100", "time_no_millis");
    assertParses("10:5:30+01:00", "time_no_millis");
    assertParses("10:15:3Z", "time_no_millis");
    assertParses("10:15:3+0100", "time_no_millis");
    assertParses("10:15:3+01:00", "time_no_millis");
    assertParseException("10:15:3", "time_no_millis");
    assertParses("T10:15:30.1Z", "t_time");
    assertParses("T10:15:30.123Z", "t_time");
    assertParses("T10:15:30.123456789Z", "t_time");
    assertParses("T10:15:30.1+0100", "t_time");
    assertParses("T10:15:30.123+0100", "t_time");
    assertParses("T10:15:30.123+01:00", "t_time");
    assertParses("T10:15:30.1+01:00", "t_time");
    assertParses("T1:15:30.123Z", "t_time");
    assertParses("T1:15:30.123+0100", "t_time");
    assertParses("T1:15:30.123+01:00", "t_time");
    assertParses("T10:1:30.123Z", "t_time");
    assertParses("T10:1:30.123+0100", "t_time");
    assertParses("T10:1:30.123+01:00", "t_time");
    assertParses("T10:15:3.123Z", "t_time");
    assertParses("T10:15:3.123+0100", "t_time");
    assertParses("T10:15:3.123+01:00", "t_time");
    assertParseException("T10:15:3.1", "t_time");
    assertParseException("T10:15:3Z", "t_time");
    assertParses("T10:15:30Z", "t_time_no_millis");
    assertParses("T10:15:30+0100", "t_time_no_millis");
    assertParses("T10:15:30+01:00", "t_time_no_millis");
    assertParses("T1:15:30Z", "t_time_no_millis");
    assertParses("T1:15:30+0100", "t_time_no_millis");
    assertParses("T1:15:30+01:00", "t_time_no_millis");
    assertParses("T10:1:30Z", "t_time_no_millis");
    assertParses("T10:1:30+0100", "t_time_no_millis");
    assertParses("T10:1:30+01:00", "t_time_no_millis");
    assertParses("T10:15:3Z", "t_time_no_millis");
    assertParses("T10:15:3+0100", "t_time_no_millis");
    assertParses("T10:15:3+01:00", "t_time_no_millis");
    assertParseException("T10:15:3", "t_time_no_millis");
    assertParses("2012-W48-6", "week_date");
    assertParses("2012-W01-6", "week_date");
    assertParses("2012-W1-6", "week_date");
    assertParseException("2012-W1-8", "week_date");
    assertParses("2012-W48-6T10:15:30.1Z", "week_date_time");
    assertParses("2012-W48-6T10:15:30.123Z", "week_date_time");
    assertParses("2012-W48-6T10:15:30.123456789Z", "week_date_time");
    assertParses("2012-W48-6T10:15:30.1+0100", "week_date_time");
    assertParses("2012-W48-6T10:15:30.123+0100", "week_date_time");
    assertParses("2012-W48-6T10:15:30.1+01:00", "week_date_time");
    assertParses("2012-W48-6T10:15:30.123+01:00", "week_date_time");
    assertParses("2012-W1-6T10:15:30.1Z", "week_date_time");
    assertParses("2012-W1-6T10:15:30.123Z", "week_date_time");
    assertParses("2012-W1-6T10:15:30.1+0100", "week_date_time");
    assertParses("2012-W1-6T10:15:30.123+0100", "week_date_time");
    assertParses("2012-W1-6T10:15:30.1+01:00", "week_date_time");
    assertParses("2012-W1-6T10:15:30.123+01:00", "week_date_time");
    assertParses("2012-W48-6T10:15:30Z", "week_date_time_no_millis");
    assertParses("2012-W48-6T10:15:30+0100", "week_date_time_no_millis");
    assertParses("2012-W48-6T10:15:30+01:00", "week_date_time_no_millis");
    assertParses("2012-W1-6T10:15:30Z", "week_date_time_no_millis");
    assertParses("2012-W1-6T10:15:30+0100", "week_date_time_no_millis");
    assertParses("2012-W1-6T10:15:30+01:00", "week_date_time_no_millis");
    assertParses("2012", "year");
    assertParses("1", "year");
    assertParses("-2000", "year");
    assertParses("2012-12", "year_month");
    assertParses("1-1", "year_month");
    assertParses("2012-12-31", "year_month_day");
    assertParses("1-12-31", "year_month_day");
    assertParses("2012-1-31", "year_month_day");
    assertParses("2012-12-1", "year_month_day");
    assertParses("2018", "weekyear");
    assertParses("1", "weekyear");
    assertParses("2017", "weekyear");
    assertParses("2018-W29", "weekyear_week");
    assertParses("2018-W1", "weekyear_week");
    assertParses("2012-W31-5", "weekyear_week_day");
    assertParses("2012-W1-1", "weekyear_week_day");
}
209849.061223elasticsearch
public void testNestedSorting() throws Exception {
    List<Document> docs = new ArrayList<>();
    Document document = new Document();
    document.add(new StringField("field2", "a", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "b", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "c", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "a", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    writer.commit();
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "c", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "d", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "e", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "b", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "e", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "f", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "g", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "c", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "g", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "h", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "i", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "d", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    writer.commit();
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "i", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "j", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "k", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "f", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "k", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "l", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "m", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "g", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "h", Field.Store.NO));
    writer.addDocument(document);
    docs.clear();
    document = new Document();
    document.add(new StringField("field2", "m", Field.Store.NO));
    document.add(new StringField("filter_1", "T", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "n", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("field2", "o", Field.Store.NO));
    document.add(new StringField("filter_1", "F", Field.Store.NO));
    docs.add(document);
    document = new Document();
    document.add(new StringField("_nested_path", "parent", Field.Store.NO));
    document.add(new StringField("field1", "i", Field.Store.NO));
    docs.add(document);
    writer.addDocuments(docs);
    writer.commit();
    document = new Document();
    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
    writer.addDocument(document);
    document = new Document();
    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
    writer.addDocument(document);
    document = new Document();
    document.add(new StringField("fieldXXX", "x", Field.Store.NO));
    writer.addDocument(document);
    MultiValueMode sortMode = MultiValueMode.MIN;
    DirectoryReader reader = DirectoryReader.open(writer);
    reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0));
    IndexSearcher searcher = newSearcher(reader, false);
    PagedBytesIndexFieldData indexFieldData = getForField("field2");
    Query parentFilter = new TermQuery(new Term("_nested_path", "parent"));
    Query childFilter = Queries.not(parentFilter);
    BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
    ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
    Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
    TopFieldDocs topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits.value, equalTo(7L));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a"));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("c"));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("e"));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
    sortMode = MultiValueMode.MAX;
    nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
    sort = new Sort(new SortField("field2", nestedComparatorSource, true));
    topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits.value, equalTo(7L));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o"));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("k"));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("i"));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("g"));
    BooleanQuery.Builder bq = new BooleanQuery.Builder();
    bq.add(parentFilter, Occur.MUST_NOT);
    bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST);
    childFilter = bq.build();
    nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
    query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
    sort = new Sort(new SortField("field2", nestedComparatorSource, true));
    topDocs = searcher.search(query, 5, sort);
    assertThat(topDocs.totalHits.value, equalTo(6L));
    assertThat(topDocs.scoreDocs.length, equalTo(5));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m"));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("g"));
    assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
    assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
    assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("e"));
    searcher.getIndexReader().close();
}
205798.71281elasticsearch
public void testAutoscalingCapacity() {
    final long BYTES_IN_64GB = ByteSizeValue.ofGb(64).getBytes();
    final long AUTO_ML_MEMORY_FOR_64GB_NODE = NativeMemoryCalculator.allowedBytesForMl(BYTES_IN_64GB, randomIntBetween(5, 90), true);
    NativeMemoryCapacity capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(1).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofMb(50).getBytes());
    {
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(25, false, NativeMemoryCalculator.allowedBytesForMl(BYTES_IN_64GB, 25, false), 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(ByteSizeValue.ofGb(1).getBytes() * 4L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(ByteSizeValue.ofGb(4).getBytes() * 4L));
    }
    {
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(1335885824L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(4557111296L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(1).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(2134900736L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(7503609856L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(1).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(2134900736L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(7851737088L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(1).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(2134900736L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(8195670018L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(3).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(5712642048L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(7503609856L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(3).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(5712642048L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(7851737088L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(4).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(3).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(5712642048L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(8195670018L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(41750102016L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(47706013696L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(53666119680L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(20).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(29817307136L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(41750102016L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(20).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(29817307136L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(47706013696L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(30).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(20).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(29817307136L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(53666119680L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(100).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(131222994944L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(100).getBytes() - 2 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(131222994944L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(100).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(5).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(9294577664L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(137170518018L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(155).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(50).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 1).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(65611497472L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(202794598401L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(155).getBytes() - 4 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(50).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 2).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(65611497472L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(208758898688L));
    }
    {
        capacity = new NativeMemoryCapacity(ByteSizeValue.ofGb(155).getBytes() - 3 * NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(), ByteSizeValue.ofGb(50).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes());
        MlMemoryAutoscalingCapacity autoscalingCapacity = capacity.autoscalingCapacity(randomIntBetween(5, 90), true, AUTO_ML_MEMORY_FOR_64GB_NODE, 3).build();
        assertThat(autoscalingCapacity.nodeSize().getBytes(), equalTo(65611497472L));
        assertThat(autoscalingCapacity.tierSize().getBytes(), equalTo(202794598401L));
    }
}
203773.040171elasticsearch
public SegmentInfo read(Directory dir, String segment, byte[] segmentID, IOContext context) throws IOException {
    final String fileName = IndexFileNames.segmentFileName(segment, "", Lucene62SegmentInfoFormat.SI_EXTENSION);
    try (ChecksumIndexInput input = EndiannessReverserUtil.openChecksumInput(dir, fileName, context)) {
        Throwable priorE = null;
        SegmentInfo si = null;
        try {
            int format = CodecUtil.checkIndexHeader(input, Lucene62SegmentInfoFormat.CODEC_NAME, Lucene62SegmentInfoFormat.VERSION_START, Lucene62SegmentInfoFormat.VERSION_CURRENT, segmentID, "");
            final Version version = Version.fromBits(input.readInt(), input.readInt(), input.readInt());
            final int docCount = input.readInt();
            if (docCount < 0) {
                throw new CorruptIndexException("invalid docCount: " + docCount, input);
            }
            final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
            final Map<String, String> diagnostics = input.readMapOfStrings();
            final Set<String> files = input.readSetOfStrings();
            final Map<String, String> attributes = input.readMapOfStrings();
            int numSortFields = input.readVInt();
            Sort indexSort;
            if (numSortFields > 0) {
                SortField[] sortFields = new SortField[numSortFields];
                for (int i = 0; i < numSortFields; i++) {
                    String fieldName = input.readString();
                    int sortTypeID = input.readVInt();
                    SortField.Type sortType;
                    SortedSetSelector.Type sortedSetSelector = null;
                    SortedNumericSelector.Type sortedNumericSelector = null;
                    switch(sortTypeID) {
                        case 0 ->
                            sortType = SortField.Type.STRING;
                        case 1 ->
                            sortType = SortField.Type.LONG;
                        case 2 ->
                            sortType = SortField.Type.INT;
                        case 3 ->
                            sortType = SortField.Type.DOUBLE;
                        case 4 ->
                            sortType = SortField.Type.FLOAT;
                        case 5 ->
                            {
                                sortType = SortField.Type.STRING;
                                byte selector = input.readByte();
                                if (selector == 0) {
                                    sortedSetSelector = SortedSetSelector.Type.MIN;
                                } else if (selector == 1) {
                                    sortedSetSelector = SortedSetSelector.Type.MAX;
                                } else if (selector == 2) {
                                    sortedSetSelector = SortedSetSelector.Type.MIDDLE_MIN;
                                } else if (selector == 3) {
                                    sortedSetSelector = SortedSetSelector.Type.MIDDLE_MAX;
                                } else {
                                    throw new CorruptIndexException("invalid index SortedSetSelector ID: " + selector, input);
                                }
                            }
                        case 6 ->
                            {
                                byte type = input.readByte();
                                if (type == 0) {
                                    sortType = SortField.Type.LONG;
                                } else if (type == 1) {
                                    sortType = SortField.Type.INT;
                                } else if (type == 2) {
                                    sortType = SortField.Type.DOUBLE;
                                } else if (type == 3) {
                                    sortType = SortField.Type.FLOAT;
                                } else {
                                    throw new CorruptIndexException("invalid index SortedNumericSortField type ID: " + type, input);
                                }
                                byte numericSelector = input.readByte();
                                if (numericSelector == 0) {
                                    sortedNumericSelector = SortedNumericSelector.Type.MIN;
                                } else if (numericSelector == 1) {
                                    sortedNumericSelector = SortedNumericSelector.Type.MAX;
                                } else {
                                    throw new CorruptIndexException("invalid index SortedNumericSelector ID: " + numericSelector, input);
                                }
                            }
                        default ->
                            throw new CorruptIndexException("invalid index sort field type ID: " + sortTypeID, input);
                    }
                    byte b = input.readByte();
                    boolean reverse;
                    if (b == 0) {
                        reverse = true;
                    } else if (b == 1) {
                        reverse = false;
                    } else {
                        throw new CorruptIndexException("invalid index sort reverse: " + b, input);
                    }
                    if (sortedSetSelector != null) {
                        sortFields[i] = new SortedSetSortField(fieldName, reverse, sortedSetSelector);
                    } else if (sortedNumericSelector != null) {
                        sortFields[i] = new SortedNumericSortField(fieldName, sortType, reverse, sortedNumericSelector);
                    } else {
                        sortFields[i] = new SortField(fieldName, sortType, reverse);
                    }
                    Object missingValue;
                    b = input.readByte();
                    if (b == 0) {
                        missingValue = null;
                    } else {
                        switch(sortType) {
                            case STRING:
                                if (b == 1) {
                                    missingValue = SortField.STRING_LAST;
                                } else if (b == 2) {
                                    missingValue = SortField.STRING_FIRST;
                                } else {
                                    throw new CorruptIndexException("invalid missing value flag: " + b, input);
                                }
                                break;
                            case LONG:
                                if (b != 1) {
                                    throw new CorruptIndexException("invalid missing value flag: " + b, input);
                                }
                                missingValue = input.readLong();
                                break;
                            case INT:
                                if (b != 1) {
                                    throw new CorruptIndexException("invalid missing value flag: " + b, input);
                                }
                                missingValue = input.readInt();
                                break;
                            case DOUBLE:
                                if (b != 1) {
                                    throw new CorruptIndexException("invalid missing value flag: " + b, input);
                                }
                                missingValue = Double.longBitsToDouble(input.readLong());
                                break;
                            case FLOAT:
                                if (b != 1) {
                                    throw new CorruptIndexException("invalid missing value flag: " + b, input);
                                }
                                missingValue = Float.intBitsToFloat(input.readInt());
                                break;
                            default:
                                throw new AssertionError("unhandled sortType=" + sortType);
                        }
                    }
                    if (missingValue != null) {
                        sortFields[i].setMissingValue(missingValue);
                    }
                }
                indexSort = new Sort(sortFields);
            } else if (numSortFields < 0) {
                throw new CorruptIndexException("invalid index sort field count: " + numSortFields, input);
            } else {
                indexSort = null;
            }
            si = new SegmentInfo(dir, version, null, segment, docCount, isCompoundFile, false, null, diagnostics, segmentID, attributes, indexSort);
            si.setFiles(files);
        } catch (Throwable exception) {
            priorE = exception;
        } finally {
            CodecUtil.checkFooter(input, priorE);
        }
        return si;
    }
}
206647.8512213elasticsearch
public void testCreateAndRestoreSearchableSnapshot() throws Exception {
    final String fsRepoName = randomAlphaOfLength(10);
    final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String aliasName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createRepository(fsRepoName, "fs", Settings.builder().put("location", randomRepoPath()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
    final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true);
    if (randomBoolean()) {
        originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false");
    }
    assertAcked(prepareCreate(indexName, originalIndexSettings));
    assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName));
    populateIndex(indexName, 10_000);
    final TotalHits originalAllHits = SearchResponseUtils.getTotalHits(internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true));
    final TotalHits originalBarHits = SearchResponseUtils.getTotalHits(internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")));
    logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits);
    expectThrows(ResourceNotFoundException.class, "Searchable snapshot stats on a non snapshot searchable index should fail", () -> client().execute(SearchableSnapshotsStatsAction.INSTANCE, new SearchableSnapshotsStatsRequest()).actionGet());
    final SnapshotInfo snapshotInfo = createFullSnapshot(fsRepoName, snapshotName);
    ensureGreen(indexName);
    assertShardFolders(indexName, false);
    assertThat(clusterAdmin().prepareState().clear().setMetadata(true).setIndices(indexName).get().getState().metadata().index(indexName).getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN));
    final boolean deletedBeforeMount = randomBoolean();
    if (deletedBeforeMount) {
        assertAcked(indicesAdmin().prepareDelete(indexName));
    } else {
        assertAcked(indicesAdmin().prepareClose(indexName));
    }
    final boolean cacheEnabled = randomBoolean();
    logger.info("--> restoring index [{}] with cache [{}]", restoredIndexName, cacheEnabled ? "enabled" : "disabled");
    Settings.Builder indexSettingsBuilder = Settings.builder().put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), cacheEnabled);
    boolean preWarmEnabled = false;
    if (cacheEnabled) {
        preWarmEnabled = randomBoolean();
        indexSettingsBuilder.put(SearchableSnapshots.SNAPSHOT_CACHE_PREWARM_ENABLED_SETTING.getKey(), preWarmEnabled);
    }
    final List<String> nonCachedExtensions;
    if (randomBoolean()) {
        nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim"));
        indexSettingsBuilder.putList(SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.getKey(), nonCachedExtensions);
    } else {
        nonCachedExtensions = Collections.emptyList();
    }
    if (randomBoolean()) {
        indexSettingsBuilder.put(SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(randomLongBetween(10, 100_000)));
    }
    final int expectedReplicas;
    if (randomBoolean()) {
        expectedReplicas = numberOfReplicas();
        indexSettingsBuilder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, expectedReplicas);
    } else {
        expectedReplicas = 0;
    }
    final String expectedDataTiersPreference;
    if (randomBoolean()) {
        expectedDataTiersPreference = String.join(",", randomSubsetOf(DataTier.ALL_DATA_TIERS.stream().filter(tier -> tier.equals(DataTier.DATA_FROZEN) == false).collect(Collectors.toSet())));
        indexSettingsBuilder.put(DataTier.TIER_PREFERENCE, expectedDataTiersPreference);
    } else {
        expectedDataTiersPreference = MountSearchableSnapshotRequest.Storage.FULL_COPY.defaultDataTiersPreference();
    }
    final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest(restoredIndexName, fsRepoName, snapshotInfo.snapshotId().getName(), indexName, indexSettingsBuilder.build(), Strings.EMPTY_ARRAY, true, MountSearchableSnapshotRequest.Storage.FULL_COPY);
    final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
    final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(fsRepoName).get().repositories().get(0);
    assertThat(repositoryMetadata.name(), equalTo(fsRepoName));
    assertThat(repositoryMetadata.uuid(), not(equalTo(RepositoryData.MISSING_UUID)));
    final Settings settings = indicesAdmin().prepareGetSettings(restoredIndexName).get().getIndexToSettings().get(restoredIndexName);
    assertThat(SearchableSnapshots.SNAPSHOT_REPOSITORY_UUID_SETTING.get(settings), equalTo(repositoryMetadata.uuid()));
    assertThat(SearchableSnapshots.SNAPSHOT_REPOSITORY_NAME_SETTING.get(settings), equalTo(fsRepoName));
    assertThat(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.get(settings), equalTo(snapshotName));
    assertThat(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings), equalTo(SEARCHABLE_SNAPSHOT_STORE_TYPE));
    assertThat(IndexModule.INDEX_RECOVERY_TYPE_SETTING.get(settings), equalTo(SNAPSHOT_RECOVERY_STATE_FACTORY_KEY));
    assertTrue(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(settings));
    assertTrue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.exists(settings));
    assertTrue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.exists(settings));
    assertThat(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(settings).toString(), equalTo("false"));
    assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings), equalTo(expectedReplicas));
    assertThat(DataTier.TIER_PREFERENCE_SETTING.get(settings), equalTo(expectedDataTiersPreference));
    assertThat(IndexSettings.INDEX_CHECK_ON_STARTUP.get(settings), equalTo("false"));
    checkSoftDeletesNotEagerlyLoaded(restoredIndexName);
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, preWarmEnabled);
    assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions);
    ensureGreen(restoredIndexName);
    assertBusy(() -> assertShardFolders(restoredIndexName, true), 30, TimeUnit.SECONDS);
    assertThat(clusterAdmin().prepareState().clear().setMetadata(true).setIndices(restoredIndexName).get().getState().metadata().index(restoredIndexName).getTimestampRange(), sameInstance(IndexLongFieldRange.UNKNOWN));
    if (deletedBeforeMount) {
        assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(0));
        assertAcked(indicesAdmin().prepareAliases().addAlias(restoredIndexName, aliasName));
    } else if (indexName.equals(restoredIndexName) == false) {
        assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(1));
        assertAcked(indicesAdmin().prepareAliases().addAliasAction(IndicesAliasesRequest.AliasActions.remove().index(indexName).alias(aliasName).mustExist(true)).addAlias(restoredIndexName, aliasName));
    }
    assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(1));
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
    internalCluster().fullRestart();
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, preWarmEnabled);
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
    assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions);
    internalCluster().ensureAtLeastNumDataNodes(2);
    final DiscoveryNode dataNode = randomFrom(clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values());
    updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNode.getName()), restoredIndexName);
    assertFalse(clusterAdmin().prepareHealth(restoredIndexName).setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, preWarmEnabled);
    assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions);
    updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()), restoredIndexName);
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    assertRecoveryStats(restoredIndexName, preWarmEnabled);
    final String clonedIndexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    assertAcked(indicesAdmin().prepareResizeIndex(restoredIndexName, clonedIndexName).setResizeType(ResizeType.CLONE).setSettings(Settings.builder().putNull(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()).putNull(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey()).build()));
    ensureGreen(clonedIndexName);
    assertTotalHits(clonedIndexName, originalAllHits, originalBarHits);
    final Settings clonedIndexSettings = indicesAdmin().prepareGetSettings(clonedIndexName).get().getIndexToSettings().get(clonedIndexName);
    assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_REPOSITORY_NAME_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.getKey()));
    assertFalse(clonedIndexSettings.hasValue(IndexModule.INDEX_RECOVERY_TYPE_SETTING.getKey()));
    assertAcked(indicesAdmin().prepareDelete(restoredIndexName));
    assertThat(indicesAdmin().prepareGetAliases(aliasName).get().getAliases().size(), equalTo(0));
    assertAcked(indicesAdmin().prepareAliases().addAlias(clonedIndexName, aliasName));
    assertTotalHits(aliasName, originalAllHits, originalBarHits);
}
207070.291247gwt
public void testDoubleBits() {
    compareDoubleBits(0x0000000000000000L, 0.0);
    compareDoubleBits(0x8000000000000000L, -0.0);
    compareDoubleBits(0x8000000000000000L, 1.0 / Double.NEGATIVE_INFINITY);
    compareDoubleBits(0x7ff8000000000000L, Double.NaN);
    compareDoubleBits(0x7ff0000000000000L, Double.POSITIVE_INFINITY);
    compareDoubleBits(0xfff0000000000000L, Double.NEGATIVE_INFINITY);
    compareDoubleBits(0x3feffffffffffff7L, 0.999999999999999);
    compareDoubleBits(0x3ff0000000000000L, 1.0);
    compareDoubleBits(0x3ff0000000000005L, 1.000000000000001);
    compareDoubleBits(0x3ffffffffffffffbL, 1.999999999999999);
    compareDoubleBits(0x4000000000000000L, 2.0);
    compareDoubleBits(0x4000000000000002L, 2.000000000000001);
    compareDoubleBits(0x3fb999999999999aL, 0.1);
    compareDoubleBits(0xbfb999999999999aL, -0.1);
    compareDoubleBits(0x017527e6d48c1653L, 0.1234e-300);
    compareDoubleBits(0x817527e6d48c1653L, -0.1234e-300);
    compareDoubleBits(0x7e0795f2d9000b3fL, 0.1234e300);
    compareDoubleBits(0xfe0795f2d9000b3fL, -0.1234e300);
    compareDoubleBits(0x3fc999999999999aL, 0.2);
    compareDoubleBits(0x4272c3598dd61e72L, 1289389399393.902892);
    compareDoubleBits(0x405edd3c07ee0b0bL, 123.456789);
    compareDoubleBits(0xc05edd3c07ee0b0bL, -123.456789);
    compareDoubleBits(0x7fefffffffffffffL, 1.7976931348623157E308);
    compareDoubleBits(0xffefffffffffffffL, -1.7976931348623157E308);
    compareDoubleBits(0x0010000000000000L, 2.2250738585072014E-308);
    compareDoubleBits(0x8010000000000000L, -2.2250738585072014E-308);
    compareDoubleBits(0x000ff6a8ebe79958L, 2.22E-308);
    compareDoubleBits(0x000199999999999aL, 2.2250738585072014E-309);
    compareDoubleBits(0x800016b9f3c0e51dL, -1.234567E-310);
    compareDoubleBits(0x000016b9f3c0e51dL, 1.234567E-310);
    compareDoubleBits(0x00000245cb934a1cL, 1.234567E-311);
    compareDoubleBits(0x0000003a2df52103L, 1.234567E-312);
    compareDoubleBits(0x00000005d165501aL, 1.234567E-313);
    compareDoubleBits(0x0000000094f08803L, 1.234567E-314);
    compareDoubleBits(0x000000000ee4da67L, 1.234567E-315);
    compareDoubleBits(0x00000000017d490aL, 1.234567E-316);
    compareDoubleBits(0x00000000002620e7L, 1.234567E-317);
    compareDoubleBits(0x000000000003d017L, 1.234567E-318);
    compareDoubleBits(0x000000000000619cL, 1.234567E-319);
    compareDoubleBits(0x00000000000009c3L, 1.234567E-320);
    compareDoubleBits(0x00000000000000faL, 1.234567E-321);
    compareDoubleBits(0x0000000000000019L, 1.234567E-322);
    compareDoubleBits(0x0000000000000002L, 1.234567E-323);
    compareDoubleBits(0x0000000000000001L, 4.9E-324);
    compareDoubleBits(0x8000000000000001L, -4.9E-324);
    compareDoubleBits(0x3fe9b9bcd3c39dabL, 0.8039230476396616);
    compareDoubleBits(0x3fe669d4a374efc4L, 0.700418776752024);
    compareDoubleBits(0x3fd92b7ca312ca7eL, 0.39327922749649946);
    compareDoubleBits(0x3fbc74aa296b7e18L, 0.11115516196468211);
    compareDoubleBits(0x3feea888cdfcb13dL, 0.95807304603435);
    compareDoubleBits(0x3fd88b23cfa7eadaL, 0.3834924247636714);
    compareDoubleBits(0x3fd62865167eb9bcL, 0.3462155074766107);
    compareDoubleBits(0x3fe5772b57e62b3fL, 0.6707970349101301);
    compareDoubleBits(0x3fbd09988fb96be0L, 0.11342767247099017);
    compareDoubleBits(0x3fb643296d7fa050L, 0.08696230815223882);
    compareDoubleBits(0x3fde7f76986c1bd4L, 0.4765297401904125);
    compareDoubleBits(0x3fef4b4433f8efacL, 0.9779377951704098);
    compareDoubleBits(0x3fd374e530a19278L, 0.3040097212708939);
    compareDoubleBits(0x3fc17adf98fc3368L, 0.1365622994420861);
    compareDoubleBits(0x3fd6beb0e5a5055aL, 0.355388855230634);
    compareDoubleBits(0x3fc3d8128b76ba20L, 0.1550315075850941);
    compareDoubleBits(0x3fc47c5027f58900L, 0.16004373503808011);
    compareDoubleBits(0x3fe0ba6a91eeb5a7L, 0.522755894684157);
    compareDoubleBits(0x3fe68f019034c7b9L, 0.704956800129586);
    compareDoubleBits(0x3fe3990dbaf329c4L, 0.6124333049167991);
    compareDoubleBits(0x3faded6423ccf8f0L, 0.058451775903891945);
    compareDoubleBits(0x3fe51aebf7ee4537L, 0.6595363466641747);
    compareDoubleBits(0x3fe937bb75080f7dL, 0.7880532537242143);
    compareDoubleBits(0x3fc693d447054de4L, 0.17638638942535956);
    compareDoubleBits(0x3fd95091de22548eL, 0.39554259007247417);
    compareDoubleBits(0x3fe93b21f50b1a41L, 0.788468340492564);
    compareDoubleBits(0x3fd77d9f7da868b8L, 0.36704242011331756);
    compareDoubleBits(0x3fcb8abae3f1c05cL, 0.2151712048539619);
    compareDoubleBits(0x3feec9ed025ddcf3L, 0.9621491476272283);
    compareDoubleBits(0x3fda1ac9bf0e59c0L, 0.4078850141317183);
    compareDoubleBits(0x3fe66e66602de93eL, 0.7009765509131183);
    compareDoubleBits(0x3fe6da2963aecb21L, 0.714131064122146);
    compareDoubleBits(0x3fb306bb648e4ae0L, 0.07432147221542662);
    compareDoubleBits(0x3fd06b9877b9b50eL, 0.25656711284575884);
    compareDoubleBits(0x3fce870599f3a28cL, 0.2384955407826691);
    compareDoubleBits(0x3fe14a5c59d8ce59L, 0.5403272394964446);
    compareDoubleBits(0x3fb118ac2dc6a700L, 0.06678272359445359);
    compareDoubleBits(0x3fafb0e23ecbc770L, 0.06189639107277756);
    compareDoubleBits(0x3fe4475a31a9723aL, 0.633710000034234);
    compareDoubleBits(0x3fdd5e0f4a0296f6L, 0.4588659498934783);
    compareDoubleBits(0x3fefbc13bb0b2b44L, 0.991708627051914);
    compareDoubleBits(0x3fde5c601db8b162L, 0.4743881502388573);
    compareDoubleBits(0x3fdda64289b8cde6L, 0.4632726998272275);
    compareDoubleBits(0x3fea18660f99c86bL, 0.8154783539487317);
    compareDoubleBits(0x3fec39460d8a8808L, 0.8819914116359096);
    compareDoubleBits(0x3fd6a29437ecfad4L, 0.3536730333470761);
    compareDoubleBits(0x3fe1c31fcb975395L, 0.5550688721074147);
    compareDoubleBits(0x3fc784448f1277b0L, 0.18372399316734578);
    compareDoubleBits(0x3fe78d52a1f7d63cL, 0.7360013163985921);
    compareDoubleBits(0x3feb0d9bee281702L, 0.8454112674232592);
    compareDoubleBits(0x3fc382ec2f0ee738L, 0.15243294046177325);
    compareDoubleBits(0x3fe616577bf4b8d5L, 0.6902272625937039);
    compareDoubleBits(0x3fdd6ffcb6caedacL, 0.4599601540646414);
    compareDoubleBits(0x3fdfa267b07ca0e4L, 0.49428741679231636);
    compareDoubleBits(0x3fcdc3688fcb9f34L, 0.23252589246043842);
    compareDoubleBits(0x3fc6bd1204233708L, 0.1776449699595377);
    compareDoubleBits(0x3fd75236cfc8fafeL, 0.364392950930707);
    compareDoubleBits(0x3fef34680bd4ce47L, 0.9751472693519155);
    compareDoubleBits(0x3fc634b5d386b93cL, 0.17348358944350106);
    compareDoubleBits(0x3feaf69abdedcf4bL, 0.8426030835675901);
    compareDoubleBits(0x3fdcf973748a67e0L, 0.45272528057978256);
    compareDoubleBits(0x3fec8f6155ecd410L, 0.8925024679398366);
    compareDoubleBits(0x3fe3e8d8466d453aL, 0.6221734405063792);
    compareDoubleBits(0x3fdfa7ff50fced6aL, 0.4946287432573714);
    compareDoubleBits(0x3fe536d9d49d33beL, 0.6629456665628977);
    compareDoubleBits(0x3fdfdff0e8e048aeL, 0.4980432771855118);
    compareDoubleBits(0x3feb4abc3a80aeacL, 0.8528729574804479);
    compareDoubleBits(0x3fbf44d011fd7950L, 0.12214374961101737);
    compareDoubleBits(0x3fdb59c21a7ecd6aL, 0.4273534067862871);
    compareDoubleBits(0x3fbb4128fb635888L, 0.10646301400569957);
    compareDoubleBits(0x3fc03e9c906fa23cL, 0.12691075375120586);
    compareDoubleBits(0x3f976c3738766d00L, 0.022873747655109078);
    compareDoubleBits(0x3fd9a62096187b4eL, 0.4007646051194812);
    compareDoubleBits(0x3fdcea7def3c0528L, 0.45181225168933503);
    compareDoubleBits(0x3fe20ea23d703cb2L, 0.5642863464326153);
    compareDoubleBits(0x3fd6f2c3edcf5bf4L, 0.35856722091324333);
    compareDoubleBits(0x3fef2c3c9e7a6dc0L, 0.9741499991682119);
    compareDoubleBits(0x3fcc2142c7ab8c28L, 0.21976504086987458);
    compareDoubleBits(0x3fea41a3e626ff58L, 0.8205127234614151);
    compareDoubleBits(0x3fe4162d28c9abe8L, 0.6277070805202785);
    compareDoubleBits(0x3fce8826a9ca117cL, 0.23852999964232058);
    compareDoubleBits(0x3fe07fbd24b88c67L, 0.5155931203083924);
    compareDoubleBits(0x3fdb39c66484189aL, 0.4254013043977324);
    compareDoubleBits(0x3fcb830d50fac7b0L, 0.21493689016420836);
    compareDoubleBits(0x3fd927ccb62342c8L, 0.3930541781128736);
    compareDoubleBits(0x3fb553b2448dd6d8L, 0.08330835508044332);
    compareDoubleBits(0x3fef870d8a9f527bL, 0.9852359492748087);
    compareDoubleBits(0x3febe929c4bac429L, 0.8722122995733389);
    compareDoubleBits(0x3fc9cc2d6286a01cL, 0.20154349623521817);
    compareDoubleBits(0x3fe5b506615ab5c6L, 0.6783477689220312);
    compareDoubleBits(0x3fe26c9e0a02bdfeL, 0.5757589526673994);
    compareDoubleBits(0x3fe6daf54806b05cL, 0.7142282873878787);
    compareDoubleBits(0x3fefc9bbb28f362eL, 0.9933756339539224);
    compareDoubleBits(0x3fbd455782e84968L, 0.1143393225286552);
    compareDoubleBits(0x3fe1f09744a3d0b7L, 0.5606190052626719);
    compareDoubleBits(0x3fb1c833b64b5470L, 0.06946109009307277);
    compareDoubleBits(0x3fec140750dfb23bL, 0.8774448947493235);
    compareDoubleBits(0x3fc3b30746f5e6bcL, 0.15390101399298384);
    compareDoubleBits(0x3fe844deac963963L, 0.7584069605671114);
    compareDoubleBits(0x3fd45d6e91a9989eL, 0.31820263123371173);
    compareDoubleBits(0xcdcde6aa7873b572L, -6.297893811982062E66);
    compareDoubleBits(0xb34ea52b6e9df882L, -1.4898867990306772E-61);
    compareDoubleBits(0x64adf2aa312ca7e1L, 9.480996430600118E176);
    compareDoubleBits(0x1c74aa06a5adf865L, 1.3367811675349397E-171);
    compareDoubleBits(0xf5444671bf9627b5L, -7.610810186261922E256);
    compareDoubleBits(0x622c8f3efa7eadb0L, 8.223166382138422E164);
    compareDoubleBits(0x58a1947167eb9bccL, 8.86632413276402E118);
    compareDoubleBits(0xabb95a84fcc567f2L, -4.6366137067352683E-98);
    compareDoubleBits(0x1d0998843ee5af9aL, 8.477749983935152E-169);
    compareDoubleBits(0x1643296db5fe8140L, 1.9557345103545524E-201);
    compareDoubleBits(0x79fdda7486c1bd5fL, 4.2335912871234087E279);
    compareDoubleBits(0xfa5a219d7f1df591L, -2.3716857301453343E281);
    compareDoubleBits(0x4dd394c40a192798L, 8.248528934271815E66);
    compareDoubleBits(0x22f5bf1fc7e19b49L, 2.853336355041256E-140);
    compareDoubleBits(0x5afac3aa5a5055b7L, 1.8552153438817665E130);
    compareDoubleBits(0x27b025195bb5d113L, 1.6005805986130906E-117);
    compareDoubleBits(0x28f8a05a3fac4815L, 2.5600128085455218E-111);
    compareDoubleBits(0x85d354a03dd6b4f3L, -1.3311552586579328E-280);
    compareDoubleBits(0xb4780c890698f727L, -6.129954074000813E-56);
    compareDoubleBits(0x9cc86de85e65389aL, -5.057128159830759E-170);
    compareDoubleBits(0x0ef6b2304799f1f0L, 1.3941634112607635E-236);
    compareDoubleBits(0xa8d75f9ffdc8a6fcL, -6.0744368561933565E-112);
    compareDoubleBits(0xc9bddb8fa101efb6L, -1.7045710739729275E47);
    compareDoubleBits(0x2d27a89b382a6f20L, 3.6294490400055576E-91);
    compareDoubleBits(0x65424775e22548f8L, 5.925748939163494E179);
    compareDoubleBits(0xc9d90fb1a1634836L, -5.722990169624538E47);
    compareDoubleBits(0x5df67dbfda868b9eL, 4.3882436353810935E144);
    compareDoubleBits(0x371575e91f8e02eeL, 2.4058151686870744E-43);
    compareDoubleBits(0xf64f681f4bbb9e72L, -7.726253195850651E261);
    compareDoubleBits(0x686b26cff0e59c06L, 9.910208820012368E194);
    compareDoubleBits(0xb373331d05bd27ceL, -7.467486715472904E-61);
    compareDoubleBits(0xb6d14b2b75d96425L, -1.211676991363774E-44);
    compareDoubleBits(0x1306bb6092392b97L, 5.1516895067444726E-217);
    compareDoubleBits(0x41ae61d47b9b50e7L, 2.548639338033516E8);
    compareDoubleBits(0x3d0e0b31cf9d1469L, 1.3342095786138988E-14);
    compareDoubleBits(0x8a52e2e03b19cb38L, -6.141707139267237E-259);
    compareDoubleBits(0x1118ac38b71a9c13L, 2.603758248081968E-226);
    compareDoubleBits(0x0fd871017d978ee5L, 2.459857504627046E-232);
    compareDoubleBits(0xa23ad18a352e4745L, -8.590863325618324E-144);
    compareDoubleBits(0x75783d29a0296f70L, 7.278962913424277E257);
    compareDoubleBits(0xfde09de561656888L, -2.1734648688870485E298);
    compareDoubleBits(0x79718077db8b1623L, 9.695260031659742E276);
    compareDoubleBits(0x76990a379b8cde6dL, 1.971192366484015E263);
    compareDoubleBits(0xd0c3304ff3390d67L, -1.1376138095726657E81);
    compareDoubleBits(0xe1ca3067b1510109L, -1.1782242328468048E163);
    compareDoubleBits(0x5a8a50c47ecfad55L, 1.425081729881992E128);
    compareDoubleBits(0x8e18fe5072ea72a0L, -9.37063623070425E-241);
    compareDoubleBits(0x2f08893a7893bd8dL, 4.0416231167192477E-82);
    compareDoubleBits(0xbc6a95093efac79fL, -1.1528179597119854E-17);
    compareDoubleBits(0xd86cdf4ec502e047L, -9.101010985774022E117);
    compareDoubleBits(0x2705d86b787739cfL, 1.0574785023773003E-120);
    compareDoubleBits(0xb0b2bbf87e971aa7L, -4.141881119170908E-74);
    compareDoubleBits(0x75bff2d46caedac7L, 1.5350675559647406E259);
    compareDoubleBits(0x7e899eea07ca0e54L, 3.4316079097255617E301);
    compareDoubleBits(0x3b86d1277e5cf9a3L, 6.039611960807865E-22);
    compareDoubleBits(0x2d7a243c2119b84aL, 1.2833127546095387E-89);
    compareDoubleBits(0x5d48db16fc8faff2L, 2.3679693140335752E141);
    compareDoubleBits(0xf9a340497a99c8efL, -8.531434157588162E277);
    compareDoubleBits(0x2c696ba49c35c9f4L, 9.520836999385529E-95);
    compareDoubleBits(0xd7b4d5dabdb9e964L, -3.206856786877124E114);
    compareDoubleBits(0x73e5cde748a67e04L, 1.9514110919532497E250);
    compareDoubleBits(0xe47b0a84bd9a821eL, -1.0700933026823467E176);
    compareDoubleBits(0x9f46c209cda8a753L, -5.179951037444452E-158);
    compareDoubleBits(0x7e9ffd4f0fced6b4L, 8.569252835514251E301);
    compareDoubleBits(0xa9b6ce9893a677d6L, -9.711135665099199E-108);
    compareDoubleBits(0x7f7fc3978e048af9L, 1.3940913327465408E306);
    compareDoubleBits(0xda55e1fe5015d581L, -1.4812924721906427E127);
    compareDoubleBits(0x1f44d03847f5e54dL, 4.737338914555982E-158);
    compareDoubleBits(0x6d670848a7ecd6b5L, 1.016307848543568E219);
    compareDoubleBits(0x1b4128e3ed8d6230L, 2.117302740808736E-177);
    compareDoubleBits(0x207d392f837d11f6L, 3.4873269549337505E-152);
    compareDoubleBits(0x05db0dd638766d17L, 1.8630149420804414E-280);
    compareDoubleBits(0x669882566187b4e0L, 1.6662694932683939E186);
    compareDoubleBits(0x73a9f7acf3c05283L, 1.452500477811838E249);
    compareDoubleBits(0x907511ceae07965bL, -2.1713947206680943E-229);
    compareDoubleBits(0x5bcb0f90dcf5bf4eL, 1.5366281556927236E134);
    compareDoubleBits(0xf961e4e2cf4db80dL, -4.956276753857123E276);
    compareDoubleBits(0x3842859a3d5c6147L, 1.0886185451514296E-37);
    compareDoubleBits(0xd20d1f11c4dfeb10L, -1.8103414291013452E87);
    compareDoubleBits(0xa0b1697219357d01L, -3.32451488002257E-151);
    compareDoubleBits(0x3d104d584e508be4L, 1.44791988988344E-14);
    compareDoubleBits(0x83fde93797118ce8L, -1.9182940419362316E-289);
    compareDoubleBits(0x6ce719b2484189a7L, 3.9816763934214935E216);
    compareDoubleBits(0x37061aa687d63d94L, 1.238977854703573E-43);
    compareDoubleBits(0x649f32e062342c83L, 4.938493427207422E176);
    compareDoubleBits(0x1553b27712375b61L, 6.135160266378285E-206);
    compareDoubleBits(0xfc386c6b53ea4f67L, -2.3801480044660797E290);
    compareDoubleBits(0xdf494e029758853eL, -1.0354031977895994E151);
    compareDoubleBits(0x33985ae9143500f8L, 3.7890569411337005E-60);
    compareDoubleBits(0xada8332b2b56b8c5L, -9.503956824431546E-89);
    compareDoubleBits(0x9364f07f4057bfcaL, -3.0370938126636166E-215);
    compareDoubleBits(0xb6d7aa7b00d60b8cL, -1.6581522255623348E-44);
    compareDoubleBits(0xfe4ddda651e6c5caL, -2.500115798151869E300);
    compareDoubleBits(0x1d45578c0ba125baL, 1.1310118263643858E-167);
    compareDoubleBits(0x8f84ba31947a16ffL, -6.518932884318998E-234);
    compareDoubleBits(0x11c833a9d92d51dcL, 5.230715225679756E-223);
    compareDoubleBits(0xe0a03a901bf64771L, -2.7851790876803525E157);
    compareDoubleBits(0x27660eb637af35f9L, 6.833565753907854E-119);
    compareDoubleBits(0xc226f54e92c72c68L, -4.930242390758673E10);
    compareDoubleBits(0x5175ba4e1a9989e3L, 2.6381145200142355E84);
}
207023.321241gwt
public void testFloatBits() {
    compareFloatBits(0x1, 1.401298464324817E-45F);
    compareFloatBits(0x2, 1.401298464324817E-45F * 2.0F);
    compareFloatBits(0x3, 1.401298464324817E-45F * 3.0F);
    compareFloatBits(0x00ba98, 1.401298464324817E-45F * 0x00ba98);
    compareFloatBits(8034422, 1.401298464324817E-45F * 8034422);
    compareFloatBits(0x7fffff, 1.401298464324817E-45F * 0x7fffff);
    compareFloatBits(0x80000001, -1.401298464324817E-45F);
    compareFloatBits(0x80000002, -1.401298464324817E-45F * 2.0F);
    compareFloatBits(0x80000003, -1.401298464324817E-45F * 3.0F);
    compareFloatBits(0x8000ba98, -1.401298464324817E-45F * 0x00ba98);
    compareFloatBits(0x807a9876, -1.401298464324817E-45F * 0x7a9876);
    compareFloatBits(0x807fffff, -1.401298464324817E-45F * 0x7fffff);
    compareFloatBits(0x00800000, 1.1754943508222875E-38F);
    compareFloatBits(0x00800001, 1.175494490952134E-38F);
    compareFloatBits(0x00801234, 1.176147355906663E-38F);
    compareFloatBits(0x80800000, -1.1754943508222875E-38F);
    compareFloatBits(0x80800001, -1.175494490952134E-38F);
    compareFloatBits(0x80801234, -1.176147355906663E-38F);
    compareFloatBits(0x0, 0.0F);
    compareFloatBits(0x80000000, -0.0F);
    compareFloatBits(0x80000000, 1.0F / Float.NEGATIVE_INFINITY);
    compareFloatBits(0x7fc00000, Float.NaN);
    compareFloatBits(0x7f800000, Float.POSITIVE_INFINITY);
    compareFloatBits(0xff800000, Float.NEGATIVE_INFINITY);
    compareFloatBits(0x3f800000, 1.0F);
    compareFloatBits(0x40000000, 2.0F);
    compareFloatBits(0x3f7ffffe, 0.9999998807907104F);
    compareFloatBits(0x3f800001, 1.0000001192092896F);
    compareFloatBits(0x3fffffff, 1.9999998807907104F);
    compareFloatBits(0x40000000, 2.0F);
    compareFloatBits(0x3dcccccd, 0.10000000149011612F);
    compareFloatBits(0xbdcccccd, -0.10000000149011612F);
    compareFloatBits(0x3e4ccccd, 0.20000000298023224F);
    compareFloatBits(0xbe4ccccd, -0.20000000298023224F);
    compareFloatBits(0x42f6e9e0, 123.456787109375F);
    compareFloatBits(0xc2f6e9e0, -123.456787109375F);
    compareFloatBits(0x7f7fffff, 3.4028234663852886E38F);
    compareFloatBits(0xff7fffff, -3.4028234663852886E38F);
    compareFloatBits(0x80000001, -1.401298464324817E-45F);
    compareFloatBits(0x3e4cdcd4, 0.2000611424446106F);
    compareFloatBits(0x3f4ef68e, 0.8084496259689331F);
    compareFloatBits(0x3dd77088, 0.10519510507583618F);
    compareFloatBits(0x3e16156c, 0.14656609296798706F);
    compareFloatBits(0x3ea3776c, 0.3192704916000366F);
    compareFloatBits(0x3f510cbb, 0.816600501537323F);
    compareFloatBits(0x3ed6e3d6, 0.4197070002555847F);
    compareFloatBits(0x3f2209e6, 0.6329635381698608F);
    compareFloatBits(0x3f20fdd3, 0.6288730502128601F);
    compareFloatBits(0x3ecd6df2, 0.4012294411659241F);
    compareFloatBits(0x3f1a107a, 0.6018139123916626F);
    compareFloatBits(0x3f47e6d3, 0.7808658480644226F);
    compareFloatBits(0x3da82010, 0.08209240436553955F);
    compareFloatBits(0x3d1c0c20, 0.038097500801086426F);
    compareFloatBits(0x3f0adc42, 0.5424233675003052F);
    compareFloatBits(0x3f5fae9f, 0.8737582564353943F);
    compareFloatBits(0x3f4eba38, 0.8075289726257324F);
    compareFloatBits(0x3f23d86a, 0.6400209665298462F);
    compareFloatBits(0x3ea11a1a, 0.3146522641181946F);
    compareFloatBits(0x3e7a8824, 0.24465996026992798F);
    compareFloatBits(0x3ef758b2, 0.483098566532135F);
    compareFloatBits(0x3e8d1874, 0.275577187538147F);
    compareFloatBits(0x3dbc6968, 0.09199792146682739F);
    compareFloatBits(0x3e940d00, 0.28916168212890625F);
    compareFloatBits(0x3edd7ba2, 0.43258386850357056F);
    compareFloatBits(0x3edf10da, 0.4356754422187805F);
    compareFloatBits(0x3e9a3f84, 0.3012658357620239F);
    compareFloatBits(0x3f21db08, 0.6322484016418457F);
    compareFloatBits(0x3f10f0c8, 0.5661740303039551F);
    compareFloatBits(0x3f7b5bc9, 0.9818692803382874F);
    compareFloatBits(0x3f786c68, 0.9704041481018066F);
    compareFloatBits(0x3f3b3106, 0.7312167882919312F);
    compareFloatBits(0x3eef40e6, 0.46729201078414917F);
    compareFloatBits(0x3f2120ea, 0.6294084787368774F);
    compareFloatBits(0x3ece201c, 0.40258872509002686F);
    compareFloatBits(0x3f26e082, 0.6518632173538208F);
    compareFloatBits(0x3e1edd60, 0.15514135360717773F);
    compareFloatBits(0x3d2c6760, 0.042090773582458496F);
    compareFloatBits(0x3f1c99e3, 0.6117231249809265F);
    compareFloatBits(0x3f62a5de, 0.8853434324264526F);
    compareFloatBits(0x3f3ca39f, 0.7368716597557068F);
    compareFloatBits(0x3f2890bd, 0.6584585309028625F);
    compareFloatBits(0x3d7568a0, 0.059914231300354004F);
    compareFloatBits(0x3e96620e, 0.2937168478965759F);
    compareFloatBits(0x3d358bb0, 0.044322669506073F);
    compareFloatBits(0x3e9e2728, 0.30889248847961426F);
    compareFloatBits(0x3e887622, 0.2665262818336487F);
    compareFloatBits(0x3ec71942, 0.38886457681655884F);
    compareFloatBits(0x3f3ecf0c, 0.7453467845916748F);
    compareFloatBits(0x3f1d8b64, 0.615408182144165F);
    compareFloatBits(0x3f22e45e, 0.6362971067428589F);
    compareFloatBits(0x3f1bc5c0, 0.6084861755371094F);
    compareFloatBits(0x3ef2ce7c, 0.4742316007614136F);
    compareFloatBits(0x3ee6d16a, 0.45081645250320435F);
    compareFloatBits(0x3e22dbf4, 0.15904217958450317F);
    compareFloatBits(0x3ec8462e, 0.39116042852401733F);
    compareFloatBits(0x3eed4110, 0.46338701248168945F);
    compareFloatBits(0x3e7d46f0, 0.24734091758728027F);
    compareFloatBits(0x3ee4ed1a, 0.44712144136428833F);
    compareFloatBits(0x3e171310, 0.14753365516662598F);
    compareFloatBits(0x3f07ee13, 0.5309764742851257F);
    compareFloatBits(0x3ea82356, 0.3283945918083191F);
    compareFloatBits(0x3eaad676, 0.33366745710372925F);
    compareFloatBits(0x3f0b7415, 0.5447400212287903F);
    compareFloatBits(0x3e5da494, 0.2164481282234192F);
    compareFloatBits(0x3eb24b98, 0.3482329845428467F);
    compareFloatBits(0x3dbcf808, 0.09226995706558228F);
    compareFloatBits(0x3ebff9ec, 0.37495362758636475F);
    compareFloatBits(0x3ea1c5c6, 0.315962016582489F);
    compareFloatBits(0x3e922946, 0.2854711413383484F);
    compareFloatBits(0x3eb24736, 0.3481995463371277F);
    compareFloatBits(0x3d870700, 0.06593132019042969F);
    compareFloatBits(0x3db58dc0, 0.08864927291870117F);
    compareFloatBits(0x3f2fbba4, 0.6864569187164307F);
    compareFloatBits(0x3e67b5b4, 0.22627907991409302F);
    compareFloatBits(0x3e1b35d8, 0.151572585105896F);
    compareFloatBits(0x3eb18776, 0.3467366099357605F);
    compareFloatBits(0x3e4a1108, 0.19733059406280518F);
    compareFloatBits(0x3f77debb, 0.968242347240448F);
    compareFloatBits(0x3f2f3f2c, 0.6845576763153076F);
    compareFloatBits(0x3ee68150, 0.45020532608032227F);
    compareFloatBits(0x3da1ca40, 0.07899904251098633F);
    compareFloatBits(0x3f1a6205, 0.6030581593513489F);
    compareFloatBits(0x3e596a8c, 0.2123205065727234F);
    compareFloatBits(0x3f2b9b3d, 0.6703374981880188F);
    compareFloatBits(0x3f5a41df, 0.8525676131248474F);
    compareFloatBits(0x3f2ba95b, 0.6705529093742371F);
    compareFloatBits(0x3c636740, 0.013879597187042236F);
    compareFloatBits(0x3ea13618, 0.3148658275604248F);
    compareFloatBits(0x3ef32f54, 0.4749704599380493F);
    compareFloatBits(0x3db49fd8, 0.08819550275802612F);
    compareFloatBits(0x3ed2654e, 0.4109291434288025F);
    compareFloatBits(0x3f18e527, 0.5972465872764587F);
    compareFloatBits(0x3e86438e, 0.2622341513633728F);
    compareFloatBits(0x3d94d468, 0.07267075777053833F);
    compareFloatBits(0x3dec0730, 0.11524808406829834F);
    compareFloatBits(0x3e746c68, 0.23869478702545166F);
    compareFloatBits(0x3f7176bc, 0.9432179927825928F);
    compareFloatBits(0x3eb06baa, 0.34457141160964966F);
    compareFloatBits(0x3ec7873e, 0.3897036910057068F);
    compareFloatBits(0x3337354c, 4.2656481014091696E-8F);
    compareFloatBits(0xcef68e86, -2.068267776E9F);
    compareFloatBits(0x1aee11a3, 9.846298654970688E-23F);
    compareFloatBits(0x25855b49, 2.313367945844274E-16F);
    compareFloatBits(0x51bbb6d8, 1.0077831168E11F);
    compareFloatBits(0xd10cbbd1, -3.7777903616E10F);
    compareFloatBits(0x6b71ebcc, 2.9246464178639103E26F);
    compareFloatBits(0xa209e607, -1.868873766564279E-18F);
    compareFloatBits(0xa0fdd3be, -4.299998635695525E-19F);
    compareFloatBits(0x66b6f9c2, 4.320389591649362E23F);
    compareFloatBits(0x9a107a3f, -2.987725166002456E-23F);
    compareFloatBits(0xc7e6d303, -118182.0234375F);
    compareFloatBits(0x1504020d, 2.6658805490381716E-26F);
    compareFloatBits(0x9c0c256, 4.640507130264806E-33F);
    compareFloatBits(0x8adc428e, -2.1210264479196232E-32F);
    compareFloatBits(0xdfae9f63, -2.516576947109521E19F);
    compareFloatBits(0xceba38f7, -1.562147712E9F);
    compareFloatBits(0xa3d86a36, -2.346374900739753E-17F);
    compareFloatBits(0x508d0d6d, 1.8931738624E10F);
    compareFloatBits(0x3ea209d0, 0.3164811134338379F);
    compareFloatBits(0x7bac59a0, 1.7897857412574353E36F);
    compareFloatBits(0x468c3af4, 17949.4765625F);
    compareFloatBits(0x178d2da8, 9.123436692979724E-25F);
    compareFloatBits(0x4a068058, 2203670.0F);
    compareFloatBits(0x6ebdd138, 2.937279840252836E28F);
    compareFloatBits(0x6f886d95, 8.444487576529374E28F);
    compareFloatBits(0x4d1fc258, 1.67519616E8F);
    compareFloatBits(0xa1db0894, -1.48422878466768E-18F);
    compareFloatBits(0x90f0c84b, -9.497190880745409E-29F);
    compareFloatBits(0xfb5bc94f, -1.1411960353742999E36F);
    compareFloatBits(0xf86c6851, -1.9179653854596293E34F);
    compareFloatBits(0xbb31060d, -0.00270116631872952F);
    compareFloatBits(0x77a07357, 6.508647400938524E33F);
    compareFloatBits(0xa120ea93, -5.452056501780286E-19F);
    compareFloatBits(0x67100ede, 6.802950247361373E23F);
    compareFloatBits(0xa6e082db, -1.5578590790627281E-15F);
    compareFloatBits(0x27b7589e, 5.088878232380797E-15F);
    compareFloatBits(0xac6764c, 1.9111204788084013E-32F);
    compareFloatBits(0x9c99e3b0, -1.0183546536936767E-21F);
    compareFloatBits(0xe2a5de8f, -1.5298749044800828E21F);
    compareFloatBits(0xbca39f3d, -0.0199733916670084F);
    compareFloatBits(0xa890bd69, -1.6069355115761082E-14F);
    compareFloatBits(0xf568a12, 1.0577605982258498E-29F);
    compareFloatBits(0x4b310752, 1.1601746E7F);
    compareFloatBits(0xb58bb7e, 4.1741140243391215E-32F);
    compareFloatBits(0x4f139499, 2.475989248E9F);
    compareFloatBits(0x443b1161, 748.2715454101562F);
    compareFloatBits(0x638ca14d, 5.188334233065301E21F);
    compareFloatBits(0xbecf0c69, -0.4043915569782257F);
    compareFloatBits(0x9d8b6455, -3.689673453519375E-21F);
    compareFloatBits(0xa2e45ebb, -6.189982361684887E-18F);
    compareFloatBits(0x9bc5c04c, -3.2715185077444394E-22F);
    compareFloatBits(0x79673ea0, 7.504317251393587E34F);
    compareFloatBits(0x7368b523, 1.8436992802490676E31F);
    compareFloatBits(0x28b6fdf6, 2.031619704824343E-14F);
    compareFloatBits(0x6423179d, 1.2034083200995491E22F);
    compareFloatBits(0x76a0886c, 1.627996995533634E33F);
    compareFloatBits(0x3f51bc17, 0.8192762732505798F);
    compareFloatBits(0x72768dd4, 4.883505414291899E30F);
    compareFloatBits(0x25c4c43e, 3.4133559007908424E-16F);
    compareFloatBits(0x87ee1312, -3.582146842575625E-34F);
    compareFloatBits(0x5411ab9d, 2.502597804032E12F);
    compareFloatBits(0x556b3b74, 1.616503635968E13F);
    compareFloatBits(0x8b741536, -4.700864797886097E-32F);
    compareFloatBits(0x3769256b, 1.3896594282414299E-5F);
    compareFloatBits(0x5925cc76, 2.916761145966592E15F);
    compareFloatBits(0x179f010a, 1.0275396467808127E-24F);
    compareFloatBits(0x5ffcf643, 3.6455660418215444E19F);
    compareFloatBits(0x50e2e3b3, 3.0452586496E10F);
    compareFloatBits(0x4914a300, 608816.0F);
    compareFloatBits(0x59239bd2, 2.878234215579648E15F);
    compareFloatBits(0x10e0e0b4, 8.869863136638123E-29F);
    compareFloatBits(0x16b1b8a0, 2.8712407025600733E-25F);
    compareFloatBits(0xafbba4b9, -3.413214433312106E-10F);
    compareFloatBits(0x39ed6d41, 4.528556310106069E-4F);
    compareFloatBits(0x26cd7698, 1.4256877403358119E-15F);
    compareFloatBits(0x58c3bbb6, 1.721687838031872E15F);
    compareFloatBits(0x32844205, 1.53968446880981E-8F);
    compareFloatBits(0xf7debb7d, -9.035098568054132E33F);
    compareFloatBits(0xaf3f2c18, -1.738701405074039E-10F);
    compareFloatBits(0x7340a8d9, 1.5264063021291595E31F);
    compareFloatBits(0x1439482f, 9.354348821593712E-27F);
    compareFloatBits(0x9a62050f, -4.673979090873511E-23F);
    compareFloatBits(0x365aa321, 3.257948492318974E-6F);
    compareFloatBits(0xab9b3d23, -1.1030381252483124E-12F);
    compareFloatBits(0xda41dfad, -1.3642651156873216E16F);
    compareFloatBits(0xaba95bd7, -1.2033662911623E-12F);
    compareFloatBits(0x38d9d45, 8.323342486879323E-37F);
    compareFloatBits(0x509b0cf8, 2.08105472E10F);
    compareFloatBits(0x7997aaa7, 9.843725829681495E34F);
    compareFloatBits(0x1693fb40, 2.3907691910171116E-25F);
    compareFloatBits(0x6932a7e3, 1.3498851156559247E25F);
    compareFloatBits(0x98e52756, -5.923483173240392E-24F);
    compareFloatBits(0x4321c7ba, 161.78018188476562F);
    compareFloatBits(0x129a8db3, 9.753697906689189E-28F);
    compareFloatBits(0x1d80e684, 3.411966546003519E-21F);
    compareFloatBits(0x3d1b1a71, 0.03786701336503029F);
    compareFloatBits(0xf176bcf8, -1.221788185872419E30F);
    compareFloatBits(0x5835d550, 7.99711099355136E14F);
    compareFloatBits(0x63c39f88, 7.217221064844452E21F);
}
207233.667235hadoop
public void testComponentRestartPolicy() {
    Map<String, Component> allComponents = new HashMap<>();
    Service mockService = mock(Service.class);
    ServiceContext serviceContext = mock(ServiceContext.class);
    when(serviceContext.getService()).thenReturn(mockService);
    ServiceScheduler serviceSchedulerInstance = new ServiceScheduler(serviceContext);
    ServiceScheduler serviceScheduler = spy(serviceSchedulerInstance);
    when(serviceScheduler.getAllComponents()).thenReturn(allComponents);
    Mockito.doNothing().when(serviceScheduler).setGracefulStop(any(FinalApplicationStatus.class));
    final String containerDiag = "Container succeeded";
    ComponentInstanceEvent componentInstanceEvent = mock(ComponentInstanceEvent.class);
    ContainerId containerId = ContainerId.newContainerId(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234L, 1), 1), 1);
    ContainerStatus containerStatus = ContainerStatus.newInstance(containerId, org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE, containerDiag, 0);
    when(componentInstanceEvent.getStatus()).thenReturn(containerStatus);
    Component comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ALWAYS, 1, 0, 1, 0);
    ComponentInstance componentInstance = comp.getAllComponentInstances().iterator().next();
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(1)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), never()).terminate(anyInt());
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ALWAYS, 0, 1, 1, 0);
    componentInstance = comp.getAllComponentInstances().iterator().next();
    containerStatus.setExitStatus(1);
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(1)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), never()).terminate(anyInt());
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 1, 0, 1, 0);
    componentInstance = comp.getAllComponentInstances().iterator().next();
    containerStatus.setExitStatus(0);
    Map<String, ComponentInstance> succeededInstances = new HashMap<>();
    succeededInstances.put(componentInstance.getCompInstanceName(), componentInstance);
    when(comp.getSucceededInstances()).thenReturn(succeededInstances.values());
    when(comp.getNumSucceededInstances()).thenReturn(new Long(1));
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, times(1)).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(0));
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 0, 1, 1, 0);
    componentInstance = comp.getAllComponentInstances().iterator().next();
    containerStatus.setExitStatus(-1);
    when(comp.getNumFailedInstances()).thenReturn(new Long(1));
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, times(1)).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(-1));
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ON_FAILURE, 0, 1, 1, 0);
    componentInstance = comp.getAllComponentInstances().iterator().next();
    containerStatus.setExitStatus(1);
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(1)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(anyInt());
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 0, 1, 3, 0);
    componentInstance = comp.getAllComponentInstances().iterator().next();
    containerStatus.setExitStatus(1);
    ComponentInstance.handleComponentInstanceRelaunch(componentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, times(1)).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(anyInt());
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ON_FAILURE, 0, 1, 3, 0);
    Iterator<ComponentInstance> iter = comp.getAllComponentInstances().iterator();
    containerStatus.setExitStatus(1);
    ComponentInstance commponentInstance = iter.next();
    ComponentInstance.handleComponentInstanceRelaunch(commponentInstance, componentInstanceEvent, false, containerDiag);
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(1)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(0)).terminate(anyInt());
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 2, 0);
    Collection<ComponentInstance> component1Instances = comp.getAllComponentInstances();
    containerStatus.setExitStatus(-1);
    Component comp2 = createComponent(componentInstance.getComponent().getScheduler(), org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 2, 1);
    Collection<ComponentInstance> component2Instances = comp2.getAllComponentInstances();
    Map<String, ComponentInstance> failed2Instances = new HashMap<>();
    for (ComponentInstance component2Instance : component2Instances) {
        failed2Instances.put(component2Instance.getCompInstanceName(), component2Instance);
        when(component2Instance.getComponent().getFailedInstances()).thenReturn(failed2Instances.values());
        when(component2Instance.getComponent().getNumFailedInstances()).thenReturn(new Long(failed2Instances.size()));
        ComponentInstance.handleComponentInstanceRelaunch(component2Instance, componentInstanceEvent, false, containerDiag);
    }
    Map<String, ComponentInstance> failed1Instances = new HashMap<>();
    for (ComponentInstance component1Instance : component1Instances) {
        failed1Instances.put(component1Instance.getCompInstanceName(), component1Instance);
        when(component1Instance.getComponent().getFailedInstances()).thenReturn(failed1Instances.values());
        when(component1Instance.getComponent().getNumFailedInstances()).thenReturn(new Long(failed1Instances.size()));
        ComponentInstance.handleComponentInstanceRelaunch(component1Instance, componentInstanceEvent, false, containerDiag);
    }
    verify(comp, never()).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, times(2)).markAsFailed(any(ComponentInstance.class));
    verify(comp, times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(-1));
    containerStatus.setExitStatus(0);
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ON_FAILURE, 2, 0);
    component1Instances = comp.getAllComponentInstances();
    comp2 = createComponent(componentInstance.getComponent().getScheduler(), org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.ON_FAILURE, 2, 1);
    component2Instances = comp2.getAllComponentInstances();
    Map<String, ComponentInstance> succeeded2Instances = new HashMap<>();
    for (ComponentInstance component2Instance : component2Instances) {
        succeeded2Instances.put(component2Instance.getCompInstanceName(), component2Instance);
        when(component2Instance.getComponent().getSucceededInstances()).thenReturn(succeeded2Instances.values());
        when(component2Instance.getComponent().getNumSucceededInstances()).thenReturn(new Long(succeeded2Instances.size()));
        ComponentInstance.handleComponentInstanceRelaunch(component2Instance, componentInstanceEvent, false, containerDiag);
    }
    Map<String, ComponentInstance> succeeded1Instances = new HashMap<>();
    for (ComponentInstance component1Instance : component1Instances) {
        succeeded1Instances.put(component1Instance.getCompInstanceName(), component1Instance);
        when(component1Instance.getComponent().getSucceededInstances()).thenReturn(succeeded1Instances.values());
        when(component1Instance.getComponent().getNumSucceededInstances()).thenReturn(new Long(succeeded1Instances.size()));
        ComponentInstance.handleComponentInstanceRelaunch(component1Instance, componentInstanceEvent, false, containerDiag);
    }
    verify(comp, times(2)).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(componentInstance.getComponent(), times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), times(1)).terminate(eq(0));
    comp = createComponent(serviceScheduler, org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 2, 0);
    component1Instances = comp.getAllComponentInstances();
    comp2 = createComponent(componentInstance.getComponent().getScheduler(), org.apache.hadoop.yarn.service.api.records.Component.RestartPolicyEnum.NEVER, 2, 1);
    component2Instances = comp2.getAllComponentInstances();
    for (ComponentInstance component2Instance : component2Instances) {
        ComponentInstance.handleComponentInstanceRelaunch(component2Instance, componentInstanceEvent, false, containerDiag);
    }
    succeeded1Instances = new HashMap<>();
    for (ComponentInstance component1Instance : component1Instances) {
        succeeded1Instances.put(component1Instance.getCompInstanceName(), component1Instance);
        when(component1Instance.getComponent().getSucceededInstances()).thenReturn(succeeded1Instances.values());
        ComponentInstance.handleComponentInstanceRelaunch(component1Instance, componentInstanceEvent, false, containerDiag);
    }
    verify(comp, times(2)).markAsSucceeded(any(ComponentInstance.class));
    verify(comp, never()).markAsFailed(any(ComponentInstance.class));
    verify(componentInstance.getComponent(), times(0)).reInsertPendingInstance(any(ComponentInstance.class));
    verify(serviceScheduler.getTerminationHandler(), never()).terminate(eq(0));
}
204552.3534185hadoop
public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnException, IOException {
    NodeId nodeId = request.getNodeId();
    String host = nodeId.getHost();
    int cmPort = nodeId.getPort();
    int httpPort = request.getHttpPort();
    Resource capability = request.getResource();
    String nodeManagerVersion = request.getNMVersion();
    Resource physicalResource = request.getPhysicalResource();
    NodeStatus nodeStatus = request.getNodeStatus();
    RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
    if (!minimumNodeManagerVersion.equals("NONE")) {
        if (minimumNodeManagerVersion.equals("EqualToRM")) {
            minimumNodeManagerVersion = YarnVersionInfo.getVersion();
        }
        if ((nodeManagerVersion == null) || (VersionUtil.compareVersions(nodeManagerVersion, minimumNodeManagerVersion)) < 0) {
            String message = "Disallowed NodeManager Version " + nodeManagerVersion + ", is less than the minimum version " + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + "NodeManager.";
            LOG.info(message);
            response.setDiagnosticsMessage(message);
            response.setNodeAction(NodeAction.SHUTDOWN);
            return response;
        }
    }
    if (checkIpHostnameInRegistration) {
        InetSocketAddress nmAddress = NetUtils.createSocketAddrForHost(host, cmPort);
        InetAddress inetAddress = Server.getRemoteIp();
        if (inetAddress != null && nmAddress.isUnresolved()) {
            final String message = "hostname cannot be resolved (ip=" + inetAddress.getHostAddress() + ", hostname=" + host + ")";
            LOG.warn("Unresolved nodemanager registration: " + message);
            response.setDiagnosticsMessage(message);
            response.setNodeAction(NodeAction.SHUTDOWN);
            return response;
        }
    }
    if (!this.nodesListManager.isValidNode(host) && !isNodeInDecommissioning(nodeId)) {
        String message = "Disallowed NodeManager from  " + host + ", Sending SHUTDOWN signal to the NodeManager.";
        LOG.info(message);
        response.setDiagnosticsMessage(message);
        response.setNodeAction(NodeAction.SHUTDOWN);
        return response;
    }
    String nid = nodeId.toString();
    Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
    if (dynamicLoadCapability != null) {
        LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to" + " settings in dynamic-resources.xml.", nid, capability, dynamicLoadCapability);
        capability = dynamicLoadCapability;
        response.setResource(capability);
    }
    if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) {
        String message = "NodeManager from  " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + " signal to the NodeManager. Node capabilities are " + capability + "; minimums are " + minAllocMb + "mb and " + minAllocVcores + " vcores";
        LOG.info(message);
        response.setDiagnosticsMessage(message);
        response.setNodeAction(NodeAction.SHUTDOWN);
        return response;
    }
    response.setContainerTokenMasterKey(containerTokenSecretManager.getCurrentKey());
    response.setNMTokenMasterKey(nmTokenSecretManager.getCurrentKey());
    RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability, nodeManagerVersion, physicalResource);
    RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
    if (oldNode == null) {
        RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), request.getRunningApplications(), nodeStatus);
        if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Found the number of previous cached log aggregation " + "status from nodemanager:" + nodeId + " is :" + request.getLogAggregationReportsForApps().size());
            }
            startEvent.setLogAggregationReportsForApps(request.getLogAggregationReportsForApps());
        }
        this.rmContext.getDispatcher().getEventHandler().handle(startEvent);
    } else {
        LOG.info("Reconnect from the node at: " + host);
        this.nmLivelinessMonitor.unregister(nodeId);
        if (CollectionUtils.isEmpty(request.getRunningApplications()) && rmNode.getState() != NodeState.DECOMMISSIONING && rmNode.getHttpPort() != oldNode.getHttpPort()) {
            switch(rmNode.getState()) {
                case RUNNING:
                    ClusterMetrics.getMetrics().decrNumActiveNodes();
                    break;
                case UNHEALTHY:
                    ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
                    break;
                default:
                    LOG.debug("Unexpected Rmnode state");
            }
            this.rmContext.getDispatcher().getEventHandler().handle(new NodeRemovedSchedulerEvent(rmNode));
            this.rmContext.getRMNodes().put(nodeId, rmNode);
            this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus));
        } else {
            oldNode.resetLastNodeHeartBeatResponse();
            this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeReconnectEvent(nodeId, rmNode, request.getRunningApplications(), request.getNMContainerStatuses()));
        }
    }
    this.nmTokenSecretManager.removeNodeKey(nodeId);
    this.nmLivelinessMonitor.register(nodeId);
    if (!rmContext.isWorkPreservingRecoveryEnabled()) {
        if (!request.getNMContainerStatuses().isEmpty()) {
            LOG.info("received container statuses on node manager register :" + request.getNMContainerStatuses());
            for (NMContainerStatus status : request.getNMContainerStatuses()) {
                handleNMContainerStatus(status, nodeId);
            }
        }
    }
    Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet(request.getNodeLabels());
    if (isDistributedNodeLabelsConf && nodeLabels != null) {
        try {
            updateNodeLabelsFromNMReport(nodeLabels, nodeId);
            response.setAreNodeLabelsAcceptedByRM(true);
        } catch (IOException ex) {
            response.setDiagnosticsMessage(ex.getMessage());
            response.setAreNodeLabelsAcceptedByRM(false);
        }
    } else if (isDelegatedCentralizedNodeLabelsConf) {
        this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId);
    }
    if (request.getNodeAttributes() != null) {
        try {
            updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
            response.setAreNodeAttributesAcceptedByRM(true);
        } catch (IOException ex) {
            String errorMsg = response.getDiagnosticsMessage() == null ? ex.getMessage() : response.getDiagnosticsMessage() + "\n" + ex.getMessage();
            response.setDiagnosticsMessage(errorMsg);
            response.setAreNodeAttributesAcceptedByRM(false);
        }
    }
    StringBuilder message = new StringBuilder();
    message.append("NodeManager from node ").append(host).append("(cmPort: ").append(cmPort).append(" httpPort: ");
    message.append(httpPort).append(") ").append("registered with capability: ").append(capability);
    message.append(", assigned nodeId ").append(nodeId);
    if (response.getAreNodeLabelsAcceptedByRM()) {
        message.append(", node labels { ").append(StringUtils.join(",", nodeLabels) + " } ");
    }
    if (response.getAreNodeAttributesAcceptedByRM()) {
        message.append(", node attributes { ").append(request.getNodeAttributes() + " } ");
    }
    LOG.info(message.toString());
    response.setNodeAction(NodeAction.NORMAL);
    response.setRMIdentifier(ResourceManager.getClusterTimeStamp());
    response.setRMVersion(YarnVersionInfo.getVersion());
    return response;
}
203373.3539188wildfly
private void parseDataSource_1_2(final XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    boolean enabled = Defaults.ENABLED.booleanValue();
    boolean persistEnabled = true;
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final DataSource.Attribute attribute = DataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        enabled = Boolean.parseBoolean(value);
                        persistEnabled = true;
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JTA:
                {
                    final String value = rawAttributeText(reader, JTA.getXmlName());
                    if (value != null) {
                        JTA.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(DATA_SOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> configPropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.DATASOURCE) {
                        list.add(operation);
                        list.addAll(configPropertiesOperations);
                        if (enabled) {
                            final ModelNode enableOperation = new ModelNode();
                            enableOperation.get(OP).set(ENABLE);
                            enableOperation.get(OP_ADDR).set(dsAddress);
                            enableOperation.get(PERSISTENT).set(persistEnabled);
                            list.add(enableOperation);
                        }
                        return;
                    } else {
                        if (DataSource.Tag.forName(reader.getLocalName()) == DataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(DataSource.Tag.forName(reader.getLocalName())) {
                        case CONNECTION_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(CONNECTION_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                CONNECTION_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                configPropertiesOperations.add(configOperation);
                                break;
                            }
                        case CONNECTION_URL:
                            {
                                String value = rawElementText(reader);
                                CONNECTION_URL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER_CLASS:
                            {
                                String value = rawElementText(reader);
                                DRIVER_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case POOL:
                            {
                                parsePool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
203154.5340185wildfly
private void parseDataSource_3_0(final XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final DataSource.Attribute attribute = DataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JTA:
                {
                    final String value = rawAttributeText(reader, JTA.getXmlName());
                    if (value != null) {
                        JTA.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(DATA_SOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> configPropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.DATASOURCE) {
                        list.add(operation);
                        list.addAll(configPropertiesOperations);
                        return;
                    } else {
                        if (DataSource.Tag.forName(reader.getLocalName()) == DataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(DataSource.Tag.forName(reader.getLocalName())) {
                        case CONNECTION_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(CONNECTION_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                CONNECTION_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                configPropertiesOperations.add(configOperation);
                                break;
                            }
                        case CONNECTION_URL:
                            {
                                String value = rawElementText(reader);
                                CONNECTION_URL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER_CLASS:
                            {
                                String value = rawElementText(reader);
                                DRIVER_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case POOL:
                            {
                                parsePool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
204106.4739184wildfly
private void parseResourceAdapter(final XMLExtendedStreamReader reader, final List<ModelNode> list, ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    final ModelNode raAddress = parentAddress.clone();
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    String archiveOrModuleName = null;
    HashMap<String, ModelNode> configPropertiesOperations = new HashMap<>();
    HashMap<String, ModelNode> connectionDefinitionsOperations = new HashMap<>();
    HashMap<String, HashMap<String, ModelNode>> cfConfigPropertiesOperations = new HashMap<>();
    HashMap<String, ModelNode> adminObjectsOperations = new HashMap<>();
    HashMap<String, HashMap<String, ModelNode>> aoConfigPropertiesOperations = new HashMap<>();
    boolean archiveOrModuleMatched = false;
    boolean txSupportMatched = false;
    boolean isXa = false;
    String id = null;
    int attributeSize = reader.getAttributeCount();
    for (int i = 0; i < attributeSize; i++) {
        Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i));
        String value = reader.getAttributeValue(i);
        switch(attribute) {
            case ID:
                {
                    id = value;
                    break;
                }
            case STATISTICS_ENABLED:
                STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                break;
            default:
                break;
        }
    }
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (Activations.Tag.forName(reader.getLocalName()) == Activations.Tag.RESOURCE_ADAPTER) {
                        if (!archiveOrModuleMatched) {
                            throw new ParserException(bundle.requiredElementMissing(ARCHIVE.getName(), RESOURCEADAPTER_NAME));
                        }
                        if (id != null) {
                            raAddress.add(RESOURCEADAPTER_NAME, id);
                        } else {
                            raAddress.add(RESOURCEADAPTER_NAME, archiveOrModuleName);
                        }
                        raAddress.protect();
                        operation.get(OP_ADDR).set(raAddress);
                        list.add(operation);
                        for (Map.Entry<String, ModelNode> entry : configPropertiesOperations.entrySet()) {
                            final ModelNode env = raAddress.clone();
                            env.add(CONFIG_PROPERTIES.getName(), entry.getKey());
                            env.protect();
                            entry.getValue().get(OP_ADDR).set(env);
                            list.add(entry.getValue());
                        }
                        for (Map.Entry<String, ModelNode> entry : connectionDefinitionsOperations.entrySet()) {
                            final ModelNode env = raAddress.clone();
                            env.add(CONNECTIONDEFINITIONS_NAME, entry.getKey());
                            env.protect();
                            entry.getValue().get(OP_ADDR).set(env);
                            list.add(entry.getValue());
                            final HashMap<String, ModelNode> properties = cfConfigPropertiesOperations.get(entry.getKey());
                            if (properties != null) {
                                for (Map.Entry<String, ModelNode> configEntry : properties.entrySet()) {
                                    final ModelNode configEnv = env.clone();
                                    configEnv.add(CONFIG_PROPERTIES.getName(), configEntry.getKey());
                                    configEnv.protect();
                                    configEntry.getValue().get(OP_ADDR).set(configEnv);
                                    list.add(configEntry.getValue());
                                }
                            }
                        }
                        for (Map.Entry<String, ModelNode> entry : adminObjectsOperations.entrySet()) {
                            final ModelNode env = raAddress.clone();
                            env.add(ADMIN_OBJECTS_NAME, entry.getKey());
                            env.protect();
                            entry.getValue().get(OP_ADDR).set(env);
                            list.add(entry.getValue());
                            final HashMap<String, ModelNode> aoProperties = aoConfigPropertiesOperations.get(entry.getKey());
                            if (aoProperties != null) {
                                for (Map.Entry<String, ModelNode> configEntry : aoProperties.entrySet()) {
                                    final ModelNode configEnv = env.clone();
                                    configEnv.add(CONFIG_PROPERTIES.getName(), configEntry.getKey());
                                    configEnv.protect();
                                    configEntry.getValue().get(OP_ADDR).set(configEnv);
                                    list.add(configEntry.getValue());
                                }
                            }
                        }
                        return;
                    } else {
                        if (AS7ResourceAdapterTags.forName(reader.getLocalName()) == AS7ResourceAdapterTags.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(AS7ResourceAdapterTags.forName(reader.getLocalName())) {
                        case ADMIN_OBJECTS:
                        case CONNECTION_DEFINITIONS:
                        case BEAN_VALIDATION_GROUPS:
                            {
                                break;
                            }
                        case ADMIN_OBJECT:
                            {
                                parseAdminObjects(reader, adminObjectsOperations, aoConfigPropertiesOperations);
                                break;
                            }
                        case CONNECTION_DEFINITION:
                            {
                                switch(Namespace.forUri(reader.getNamespaceURI())) {
                                    case RESOURCEADAPTERS_1_0:
                                    case RESOURCEADAPTERS_1_1:
                                    case RESOURCEADAPTERS_2_0:
                                        parseConnectionDefinitions_1_0(reader, connectionDefinitionsOperations, cfConfigPropertiesOperations, isXa);
                                        break;
                                    case RESOURCEADAPTERS_3_0:
                                        parseConnectionDefinitions_3_0(reader, connectionDefinitionsOperations, cfConfigPropertiesOperations, isXa);
                                        break;
                                    case RESOURCEADAPTERS_4_0:
                                        parseConnectionDefinitions_4_0(reader, connectionDefinitionsOperations, cfConfigPropertiesOperations, isXa);
                                        break;
                                    default:
                                        parseConnectionDefinitions_5_0(reader, connectionDefinitionsOperations, cfConfigPropertiesOperations, isXa);
                                        break;
                                }
                                break;
                            }
                        case BEAN_VALIDATION_GROUP:
                            {
                                String value = rawElementText(reader);
                                operation.get(BEANVALIDATION_GROUPS.getName()).add(parse(BEANVALIDATIONGROUP, value, reader));
                                break;
                            }
                        case BOOTSTRAP_CONTEXT:
                            {
                                String value = rawElementText(reader);
                                BOOTSTRAP_CONTEXT.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case CONFIG_PROPERTY:
                            {
                                parseConfigProperties(reader, configPropertiesOperations);
                                break;
                            }
                        case TRANSACTION_SUPPORT:
                            {
                                if (txSupportMatched) {
                                    throw new ParserException(bundle.unexpectedElement(TRANSACTION_SUPPORT.getXmlName()));
                                }
                                String value = rawElementText(reader);
                                TRANSACTION_SUPPORT.parseAndSetParameter(value, operation, reader);
                                ModelNode transactionSupport = parse(TRANSACTION_SUPPORT, value, reader);
                                String transactionSupportResolved = transactionSupport.resolve().asString();
                                isXa = value != null && TransactionSupportEnum.valueOf(transactionSupportResolved) == TransactionSupportEnum.XATransaction;
                                txSupportMatched = true;
                                break;
                            }
                        case WORKMANAGER:
                            {
                                parseWorkManager(operation, reader);
                                break;
                            }
                        case ARCHIVE:
                            {
                                if (archiveOrModuleMatched) {
                                    throw new ParserException(bundle.unexpectedElement(ARCHIVE.getXmlName()));
                                }
                                archiveOrModuleName = rawElementText(reader);
                                ARCHIVE.parseAndSetParameter(archiveOrModuleName, operation, reader);
                                archiveOrModuleMatched = true;
                                break;
                            }
                        case MODULE:
                            {
                                if (archiveOrModuleMatched) {
                                    throw new ParserException(bundle.unexpectedElement(MODULE.getXmlName()));
                                }
                                String moduleId = rawAttributeText(reader, "id");
                                String moduleSlot = rawAttributeText(reader, "slot", "main");
                                archiveOrModuleName = moduleId + ":" + moduleSlot;
                                MODULE.parseAndSetParameter(archiveOrModuleName, operation, reader);
                                archiveOrModuleMatched = true;
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
204435.8854143wildfly
private void writeConDef(XMLExtendedStreamWriter streamWriter, ModelNode conDef, final String poolName, final boolean isXa) throws XMLStreamException {
    streamWriter.writeStartElement(Activation.Tag.CONNECTION_DEFINITION.getLocalName());
    CLASS_NAME.marshallAsAttribute(conDef, streamWriter);
    JNDI_NAME.marshallAsAttribute(conDef, streamWriter);
    ENABLED.marshallAsAttribute(conDef, streamWriter);
    CONNECTABLE.marshallAsAttribute(conDef, streamWriter);
    TRACKING.marshallAsAttribute(conDef, streamWriter);
    USE_JAVA_CONTEXT.marshallAsAttribute(conDef, streamWriter);
    streamWriter.writeAttribute("pool-name", poolName);
    USE_CCM.marshallAsAttribute(conDef, streamWriter);
    SHARABLE.marshallAsAttribute(conDef, streamWriter);
    ENLISTMENT.marshallAsAttribute(conDef, streamWriter);
    MCP.marshallAsAttribute(conDef, streamWriter);
    ENLISTMENT_TRACE.marshallAsAttribute(conDef, streamWriter);
    writeNewConfigProperties(streamWriter, conDef);
    boolean poolRequired = INITIAL_POOL_SIZE.isMarshallable(conDef) || MAX_POOL_SIZE.isMarshallable(conDef) || MIN_POOL_SIZE.isMarshallable(conDef) || POOL_USE_STRICT_MIN.isMarshallable(conDef) || POOL_PREFILL.isMarshallable(conDef) || POOL_FAIR.isMarshallable(conDef) || POOL_FLUSH_STRATEGY.isMarshallable(conDef);
    final boolean capacityRequired = CAPACITY_INCREMENTER_CLASS.isMarshallable(conDef) || CAPACITY_INCREMENTER_PROPERTIES.isMarshallable(conDef) || CAPACITY_DECREMENTER_CLASS.isMarshallable(conDef) || CAPACITY_DECREMENTER_PROPERTIES.isMarshallable(conDef);
    poolRequired = poolRequired || capacityRequired;
    if (poolRequired) {
        if (isXa) {
            streamWriter.writeStartElement(ConnectionDefinition.Tag.XA_POOL.getLocalName());
            MIN_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            INITIAL_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            MAX_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            POOL_PREFILL.marshallAsElement(conDef, streamWriter);
            POOL_FAIR.marshallAsElement(conDef, streamWriter);
            POOL_USE_STRICT_MIN.marshallAsElement(conDef, streamWriter);
            POOL_FLUSH_STRATEGY.marshallAsElement(conDef, streamWriter);
            SAME_RM_OVERRIDE.marshallAsElement(conDef, streamWriter);
            if (conDef.hasDefined(INTERLEAVING.getName()) && conDef.get(INTERLEAVING.getName()).getType().equals(ModelType.BOOLEAN) && conDef.get(INTERLEAVING.getName()).asBoolean()) {
                streamWriter.writeEmptyElement(INTERLEAVING.getXmlName());
            } else {
                INTERLEAVING.marshallAsElement(conDef, streamWriter);
            }
            if (conDef.hasDefined(NOTXSEPARATEPOOL.getName()) && conDef.get(NOTXSEPARATEPOOL.getName()).getType().equals(ModelType.BOOLEAN) && conDef.get(NOTXSEPARATEPOOL.getName()).asBoolean()) {
                streamWriter.writeEmptyElement(NOTXSEPARATEPOOL.getXmlName());
            } else {
                NOTXSEPARATEPOOL.marshallAsElement(conDef, streamWriter);
            }
            PAD_XID.marshallAsElement(conDef, streamWriter);
            WRAP_XA_RESOURCE.marshallAsElement(conDef, streamWriter);
        } else {
            streamWriter.writeStartElement(ConnectionDefinition.Tag.POOL.getLocalName());
            MIN_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            INITIAL_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            MAX_POOL_SIZE.marshallAsElement(conDef, streamWriter);
            POOL_PREFILL.marshallAsElement(conDef, streamWriter);
            POOL_USE_STRICT_MIN.marshallAsElement(conDef, streamWriter);
            POOL_FLUSH_STRATEGY.marshallAsElement(conDef, streamWriter);
        }
        if (capacityRequired) {
            streamWriter.writeStartElement(Pool.Tag.CAPACITY.getLocalName());
            if (conDef.hasDefined(CAPACITY_INCREMENTER_CLASS.getName())) {
                streamWriter.writeStartElement(Capacity.Tag.INCREMENTER.getLocalName());
                CAPACITY_INCREMENTER_CLASS.marshallAsAttribute(conDef, streamWriter);
                CAPACITY_INCREMENTER_PROPERTIES.marshallAsElement(conDef, streamWriter);
                streamWriter.writeEndElement();
            }
            if (conDef.hasDefined(CAPACITY_DECREMENTER_CLASS.getName())) {
                streamWriter.writeStartElement(Capacity.Tag.DECREMENTER.getLocalName());
                CAPACITY_DECREMENTER_CLASS.marshallAsAttribute(conDef, streamWriter);
                CAPACITY_DECREMENTER_PROPERTIES.marshallAsElement(conDef, streamWriter);
                streamWriter.writeEndElement();
            }
            streamWriter.writeEndElement();
        }
        streamWriter.writeEndElement();
    }
    if (conDef.hasDefined(APPLICATION.getName()) || conDef.hasDefined(SECURITY_DOMAIN.getName()) || conDef.hasDefined(SECURITY_DOMAIN_AND_APPLICATION.getName()) || conDef.hasDefined(ELYTRON_ENABLED.getName())) {
        streamWriter.writeStartElement(ConnectionDefinition.Tag.SECURITY.getLocalName());
        if (conDef.hasDefined(APPLICATION.getName()) && conDef.get(APPLICATION.getName()).getType().equals(ModelType.BOOLEAN) && conDef.get(APPLICATION.getName()).asBoolean()) {
            streamWriter.writeEmptyElement(APPLICATION.getXmlName());
        } else {
            APPLICATION.marshallAsElement(conDef, streamWriter);
        }
        SECURITY_DOMAIN.marshallAsElement(conDef, streamWriter);
        SECURITY_DOMAIN_AND_APPLICATION.marshallAsElement(conDef, streamWriter);
        ELYTRON_ENABLED.marshallAsElement(conDef, streamWriter);
        AUTHENTICATION_CONTEXT.marshallAsElement(conDef, streamWriter);
        AUTHENTICATION_CONTEXT_AND_APPLICATION.marshallAsElement(conDef, streamWriter);
        streamWriter.writeEndElement();
    }
    if (conDef.hasDefined(BLOCKING_TIMEOUT_WAIT_MILLIS.getName()) || conDef.hasDefined(IDLETIMEOUTMINUTES.getName()) || conDef.hasDefined(ALLOCATION_RETRY.getName()) || conDef.hasDefined(ALLOCATION_RETRY_WAIT_MILLIS.getName()) || conDef.hasDefined(XA_RESOURCE_TIMEOUT.getName())) {
        streamWriter.writeStartElement(ConnectionDefinition.Tag.TIMEOUT.getLocalName());
        BLOCKING_TIMEOUT_WAIT_MILLIS.marshallAsElement(conDef, streamWriter);
        IDLETIMEOUTMINUTES.marshallAsElement(conDef, streamWriter);
        ALLOCATION_RETRY.marshallAsElement(conDef, streamWriter);
        ALLOCATION_RETRY_WAIT_MILLIS.marshallAsElement(conDef, streamWriter);
        XA_RESOURCE_TIMEOUT.marshallAsElement(conDef, streamWriter);
        streamWriter.writeEndElement();
    }
    if (conDef.hasDefined(BACKGROUNDVALIDATION.getName()) || conDef.hasDefined(BACKGROUNDVALIDATIONMILLIS.getName()) || conDef.hasDefined(USE_FAST_FAIL.getName()) || conDef.hasDefined(VALIDATE_ON_MATCH.getName())) {
        streamWriter.writeStartElement(ConnectionDefinition.Tag.VALIDATION.getLocalName());
        BACKGROUNDVALIDATION.marshallAsElement(conDef, streamWriter);
        BACKGROUNDVALIDATIONMILLIS.marshallAsElement(conDef, streamWriter);
        USE_FAST_FAIL.marshallAsElement(conDef, streamWriter);
        VALIDATE_ON_MATCH.marshallAsElement(conDef, streamWriter);
        streamWriter.writeEndElement();
    }
    if (conDef.hasDefined(RECOVERY_USERNAME.getName()) || conDef.hasDefined(RECOVERY_PASSWORD.getName()) || conDef.hasDefined(RECOVERY_SECURITY_DOMAIN.getName()) || conDef.hasDefined(RECOVER_PLUGIN_CLASSNAME.getName()) || conDef.hasDefined(RECOVER_PLUGIN_PROPERTIES.getName()) || conDef.hasDefined(NO_RECOVERY.getName()) || conDef.hasDefined(ELYTRON_ENABLED.getName())) {
        streamWriter.writeStartElement(ConnectionDefinition.Tag.RECOVERY.getLocalName());
        NO_RECOVERY.marshallAsAttribute(conDef, streamWriter);
        if (conDef.hasDefined(RECOVERY_USERNAME.getName()) || conDef.hasDefined(RECOVERY_PASSWORD.getName()) || conDef.hasDefined(RECOVERY_CREDENTIAL_REFERENCE.getName()) || conDef.hasDefined(RECOVERY_SECURITY_DOMAIN.getName()) || conDef.hasDefined(RECOVERY_ELYTRON_ENABLED.getName())) {
            streamWriter.writeStartElement(Recovery.Tag.RECOVER_CREDENTIAL.getLocalName());
            RECOVERY_USERNAME.marshallAsAttribute(conDef, streamWriter);
            RECOVERY_PASSWORD.marshallAsAttribute(conDef, streamWriter);
            RECOVERY_CREDENTIAL_REFERENCE.marshallAsElement(conDef, streamWriter);
            RECOVERY_SECURITY_DOMAIN.marshallAsElement(conDef, streamWriter);
            RECOVERY_ELYTRON_ENABLED.marshallAsElement(conDef, streamWriter);
            RECOVERY_AUTHENTICATION_CONTEXT.marshallAsElement(conDef, streamWriter);
            streamWriter.writeEndElement();
        }
        if (conDef.hasDefined(RECOVER_PLUGIN_CLASSNAME.getName()) || conDef.hasDefined(RECOVER_PLUGIN_PROPERTIES.getName())) {
            streamWriter.writeStartElement(Recovery.Tag.RECOVER_PLUGIN.getLocalName());
            RECOVER_PLUGIN_CLASSNAME.marshallAsAttribute(conDef, streamWriter);
            if (conDef.hasDefined(RECOVER_PLUGIN_PROPERTIES.getName())) {
                for (Property property : conDef.get(RECOVER_PLUGIN_PROPERTIES.getName()).asPropertyList()) {
                    writeProperty(streamWriter, conDef, property.getName(), property.getValue().asString(), org.jboss.jca.common.api.metadata.common.Extension.Tag.CONFIG_PROPERTY.getLocalName());
                }
            }
            streamWriter.writeEndElement();
        }
        streamWriter.writeEndElement();
    }
    streamWriter.writeEndElement();
}
215384.9423173cassandra
public void basicSaiTest() {
    CassandraRelevantProperties.SAI_INTERSECTION_CLAUSE_LIMIT.setInt(6);
    SchemaSpec schema = new SchemaSpec(KEYSPACE, "tbl1", Arrays.asList(ColumnSpec.ck("pk1", ColumnSpec.int64Type), ColumnSpec.ck("pk2", ColumnSpec.asciiType(4, 100)), ColumnSpec.ck("pk3", ColumnSpec.int64Type)), Arrays.asList(ColumnSpec.ck("ck1", ColumnSpec.asciiType(4, 100)), ColumnSpec.ck("ck2", ColumnSpec.asciiType, true), ColumnSpec.ck("ck3", ColumnSpec.int64Type)), Arrays.asList(ColumnSpec.regularColumn("v1", ColumnSpec.asciiType(40, 100)), ColumnSpec.regularColumn("v2", ColumnSpec.int64Type), ColumnSpec.regularColumn("v3", ColumnSpec.int64Type)), List.of(ColumnSpec.staticColumn("s1", ColumnSpec.asciiType(40, 100)))).withCompactionStrategy("LeveledCompactionStrategy");
    sut.schemaChange(schema.compile().cql());
    sut.schemaChange(schema.cloneWithName(schema.keyspace, schema.table + "_debug").compile().cql());
    sut.schemaChange(String.format("CREATE INDEX %s_sai_idx ON %s.%s (%s) USING 'sai' ", schema.regularColumns.get(0).name, schema.keyspace, schema.table, schema.regularColumns.get(0).name));
    sut.schemaChange(String.format("CREATE INDEX %s_sai_idx ON %s.%s (%s) USING 'sai';", schema.regularColumns.get(1).name, schema.keyspace, schema.table, schema.regularColumns.get(1).name));
    sut.schemaChange(String.format("CREATE INDEX %s_sai_idx ON %s.%s (%s) USING 'sai';", schema.regularColumns.get(2).name, schema.keyspace, schema.table, schema.regularColumns.get(2).name));
    sut.schemaChange(String.format("CREATE INDEX %s_sai_idx ON %s.%s (%s) USING 'sai';", schema.staticColumns.get(0).name, schema.keyspace, schema.table, schema.staticColumns.get(0).name));
    waitForIndexesQueryable(schema);
    DataTracker tracker = new DefaultDataTracker();
    TokenPlacementModel.ReplicationFactor rf = new TokenPlacementModel.SimpleReplicationFactor(cluster.size());
    ReplayingHistoryBuilder history = new ReplayingHistoryBuilder(seed, MAX_PARTITION_SIZE, MAX_PARTITION_SIZE, tracker, sut, schema, rf, SystemUnderTest.ConsistencyLevel.QUORUM);
    for (int run = 0; run < RUNS; run++) {
        logger.info("Starting run {}/{}...", run + 1, RUNS);
        EntropySource random = new JdkRandomEntropySource(run);
        long[] values = new long[UNIQUE_CELL_VALUES];
        for (int i = 0; i < values.length; i++) values[i] = random.next();
        for (int i = 0; i < OPERATIONS_PER_RUN; i++) {
            int partitionIndex = random.nextInt(0, NUM_PARTITIONS);
            history.visitPartition(partitionIndex).insert(random.nextInt(MAX_PARTITION_SIZE), new long[] { random.nextBoolean() ? DataGenerators.UNSET_DESCR : values[random.nextInt(values.length)], random.nextBoolean() ? DataGenerators.UNSET_DESCR : values[random.nextInt(values.length)], random.nextBoolean() ? DataGenerators.UNSET_DESCR : values[random.nextInt(values.length)] }, new long[] { random.nextBoolean() ? DataGenerators.UNSET_DESCR : values[random.nextInt(values.length)] });
            if (random.nextFloat() > 0.99f) {
                int row1 = random.nextInt(MAX_PARTITION_SIZE);
                int row2 = random.nextInt(MAX_PARTITION_SIZE);
                history.visitPartition(partitionIndex).deleteRowRange(Math.min(row1, row2), Math.max(row1, row2), random.nextBoolean(), random.nextBoolean());
            } else if (random.nextFloat() > 0.999f) {
                history.visitPartition(partitionIndex).deleteRowSlice();
            }
            if (random.nextFloat() > 0.995f) {
                history.visitPartition(partitionIndex).deleteColumns();
            }
            if (random.nextFloat() > 0.9995f) {
                history.visitPartition(partitionIndex).deletePartition();
            }
            if (i % REPAIR_SKIP == 0) {
                logger.debug("Repairing/flushing after operation {}...", i);
                repair(schema);
            } else if (i % FLUSH_SKIP == 0) {
                logger.debug("Flushing after operation {}...", i);
                flush(schema);
            }
            if (i % VALIDATION_SKIP != 0)
                continue;
            logger.debug("Validating partition at index {} after operation {} in run {}...", partitionIndex, i, run + 1);
            for (int j = 0; j < 10; j++) {
                List<Relation> relations = new ArrayList<>();
                int num = random.nextInt(1, 5);
                List<List<Relation.RelationKind>> pick = new ArrayList<>();
                pick.add(new ArrayList<>(Arrays.asList(Relation.RelationKind.EQ)));
                pick.add(new ArrayList<>(Arrays.asList(Relation.RelationKind.EQ, Relation.RelationKind.GT, Relation.RelationKind.LT)));
                pick.add(new ArrayList<>(Arrays.asList(Relation.RelationKind.EQ, Relation.RelationKind.GT, Relation.RelationKind.LT)));
                if (random.nextFloat() > 0.75f) {
                    relations.addAll(Query.clusteringSliceQuery(schema, partitionIndex, random.next(), random.next(), random.nextBoolean(), random.nextBoolean(), false).relations);
                }
                for (int k = 0; k < num; k++) {
                    int column = random.nextInt(schema.regularColumns.size());
                    Relation.RelationKind relationKind = pickKind(random, pick, column);
                    if (relationKind != null)
                        relations.add(Relation.relation(relationKind, schema.regularColumns.get(column), values[random.nextInt(values.length)]));
                }
                if (random.nextFloat() > 0.7f) {
                    relations.add(Relation.relation(Relation.RelationKind.EQ, schema.staticColumns.get(0), values[random.nextInt(values.length)]));
                }
                long pd = history.pdSelector().pdAtPosition(partitionIndex);
                FilteringQuery query = new FilteringQuery(pd, false, relations, schema);
                Reconciler reconciler = new Reconciler(history.pdSelector(), schema, history::visitor);
                Set<ColumnSpec<?>> columns = new HashSet<>(schema.allColumns);
                PartitionState modelState = reconciler.inflatePartitionState(pd, tracker, query).filter(query);
                if (modelState.rows().size() > 0)
                    logger.debug("Model contains {} matching rows for query {}.", modelState.rows().size(), query);
                try {
                    QuiescentChecker.validate(schema, tracker, columns, modelState, SelectHelper.execute(sut, history.clock(), query), query);
                    QuiescentChecker.validate(schema, tracker, columns, modelState, SelectHelper.execute(sut, history.clock(), query), query);
                } catch (Throwable t) {
                    logger.debug("Partition index = {}, run = {}, j = {}, i = {}", partitionIndex, run, j, i);
                    Query partitionQuery = Query.selectPartition(schema, pd, false);
                    QuiescentChecker.validate(schema, tracker, columns, reconciler.inflatePartitionState(pd, tracker, partitionQuery), SelectHelper.execute(sut, history.clock(), partitionQuery), partitionQuery);
                    logger.debug("Partition state agrees. Throwing original error...");
                    throw t;
                }
            }
        }
        if (run + 1 < RUNS) {
            logger.debug("Forcing compaction at the end of run {}...", run + 1);
            compact(schema);
        }
    }
}
213929.119245elasticsearch
public void testRolloverDataStreamWithoutExistingAutosharding() throws Exception {
    String dataStreamName = "no_preexising_autoshard_event_ds";
    DataStream dataStream = DataStreamTestHelper.newInstance(dataStreamName, List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 2), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 3), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 4), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 5), UUID.randomUUID().toString())), 5, null, false, null, (DataStreamAutoShardingEvent) null);
    ComposableIndexTemplate template = ComposableIndexTemplate.builder().indexPatterns(List.of(dataStreamName + "*")).dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()).template(new Template(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).build(), null, null)).build();
    Metadata.Builder builder = Metadata.builder();
    builder.put("template", template);
    for (Index index : dataStream.getIndices()) {
        builder.put(getIndexMetadataBuilderForIndex(index, 3));
    }
    builder.put(dataStream);
    final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(builder).build();
    final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin();
    ThreadPool testThreadPool = new TestThreadPool(getTestName());
    try {
        MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService(dataStream, testThreadPool, Set.of(), xContentRegistry(), telemetryPlugin.getTelemetryProvider(Settings.EMPTY));
        for (AutoShardingType type : AutoShardingType.values()) {
            telemetryPlugin.resetMeter();
            long before = testThreadPool.absoluteTimeInMillis();
            switch(type) {
                case INCREASE_SHARDS ->
                    {
                        List<Condition<?>> metConditions = List.of(new OptimalShardCountCondition(5));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(INCREASE_SHARDS, 3, 5, TimeValue.ZERO, 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 5);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.increase_shards.total", List.of("es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case DECREASE_SHARDS ->
                    {
                        {
                            List<Condition<?>> metConditions = List.of(new MaxDocsCondition(2L), new OptimalShardCountCondition(1));
                            MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33), false);
                            assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1);
                            assertTelemetry(telemetryPlugin, "es.auto_sharding.decrease_shards.total", List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                        }
                        {
                            List<Condition<?>> metConditions = List.of(new OptimalShardCountCondition(1));
                            MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33), false);
                            assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1);
                        }
                    }
                case COOLDOWN_PREVENTED_INCREASE ->
                    {
                        AutoShardingResult autoShardingResult = new AutoShardingResult(COOLDOWN_PREVENTED_INCREASE, 3, 5, TimeValue.timeValueMinutes(10), 64.33);
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), List.of(), Instant.now(), randomBoolean(), false, null, new AutoShardingResult(COOLDOWN_PREVENTED_INCREASE, 3, 5, TimeValue.timeValueMinutes(10), 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.cooldown_prevented_increase.total", List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case COOLDOWN_PREVENTED_DECREASE ->
                    {
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), List.of(), Instant.now(), randomBoolean(), false, null, new AutoShardingResult(COOLDOWN_PREVENTED_DECREASE, 3, 1, TimeValue.timeValueMinutes(10), 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.cooldown_prevented_decrease.total", List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total"));
                    }
                case NO_CHANGE_REQUIRED ->
                    {
                        List<Condition<?>> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong()));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(NO_CHANGE_REQUIRED, 3, 3, TimeValue.ZERO, 2.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3);
                        assertTelemetry(telemetryPlugin, null, List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case NOT_APPLICABLE ->
                    {
                        List<Condition<?>> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong()));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(NOT_APPLICABLE, 1, 1, TimeValue.MAX_VALUE, null), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3);
                        assertTelemetry(telemetryPlugin, null, List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
            }
        }
    } finally {
        testThreadPool.shutdown();
    }
}
213876.3410242elasticsearch
public void testRolloverDataStreamWithExistingAutoShardEvent() throws Exception {
    String dataStreamName = "ds_with_existing_autoshard_event";
    String autoShardEventTriggerIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 3);
    DataStream dataStream = DataStreamTestHelper.newInstance(dataStreamName, List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 2), UUID.randomUUID().toString()), new Index(autoShardEventTriggerIndex, UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 4), UUID.randomUUID().toString()), new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 5), UUID.randomUUID().toString())), 5, null, false, null, new DataStreamAutoShardingEvent(autoShardEventTriggerIndex, 3, System.currentTimeMillis()));
    ComposableIndexTemplate template = ComposableIndexTemplate.builder().indexPatterns(List.of(dataStreamName + "*")).dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()).build();
    Metadata.Builder builder = Metadata.builder();
    builder.put("template", template);
    int numberOfShards = 1;
    for (Index index : dataStream.getIndices()) {
        if (index.getName().equals(autoShardEventTriggerIndex)) {
            numberOfShards = 3;
        }
        builder.put(getIndexMetadataBuilderForIndex(index, numberOfShards));
    }
    builder.put(dataStream);
    final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(builder).build();
    final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin();
    ThreadPool testThreadPool = new TestThreadPool(getTestName());
    try {
        MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService(dataStream, testThreadPool, Set.of(), xContentRegistry(), telemetryPlugin.getTelemetryProvider(Settings.EMPTY));
        for (AutoShardingType type : AutoShardingType.values()) {
            telemetryPlugin.resetMeter();
            long before = testThreadPool.absoluteTimeInMillis();
            switch(type) {
                case INCREASE_SHARDS ->
                    {
                        List<Condition<?>> metConditions = List.of(new OptimalShardCountCondition(3));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(INCREASE_SHARDS, 3, 5, TimeValue.ZERO, 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 5);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.increase_shards.total", List.of("es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case DECREASE_SHARDS ->
                    {
                        {
                            List<Condition<?>> metConditions = List.of(new MaxDocsCondition(2L), new OptimalShardCountCondition(1));
                            MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33), false);
                            assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1);
                            assertTelemetry(telemetryPlugin, "es.auto_sharding.decrease_shards.total", List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                        }
                        {
                            List<Condition<?>> metConditions = List.of(new OptimalShardCountCondition(1));
                            MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(DECREASE_SHARDS, 3, 1, TimeValue.ZERO, 0.33), false);
                            assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1);
                        }
                    }
                case COOLDOWN_PREVENTED_INCREASE ->
                    {
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), List.of(), Instant.now(), randomBoolean(), false, null, new AutoShardingResult(COOLDOWN_PREVENTED_INCREASE, 3, 5, TimeValue.timeValueMinutes(10), 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.cooldown_prevented_increase.total", List.of("es.auto_sharding.decrease_shards.total", "es.auto_sharding.increase_shards.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case COOLDOWN_PREVENTED_DECREASE ->
                    {
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), List.of(), Instant.now(), randomBoolean(), false, null, new AutoShardingResult(COOLDOWN_PREVENTED_DECREASE, 3, 1, TimeValue.timeValueMinutes(10), 64.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), List.of(), 3);
                        assertTelemetry(telemetryPlugin, "es.auto_sharding.cooldown_prevented_decrease.total", List.of("es.auto_sharding.increase_shards.total", "es.auto_sharding.decrease_shards.total", "es.auto_sharding.cooldown_prevented_increase.total"));
                    }
                case NO_CHANGE_REQUIRED ->
                    {
                        List<Condition<?>> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong()));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(NO_CHANGE_REQUIRED, 3, 3, TimeValue.ZERO, 2.33), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 3);
                        assertTelemetry(telemetryPlugin, null, List.of("es.auto_sharding.decrease_shards.total", "es.auto_sharding.increase_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
                case NOT_APPLICABLE ->
                    {
                        List<Condition<?>> metConditions = List.of(new MaxDocsCondition(randomNonNegativeLong()));
                        MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState(clusterState, dataStream.getName(), null, new CreateIndexRequest("_na_"), metConditions, Instant.now(), randomBoolean(), false, null, new AutoShardingResult(NOT_APPLICABLE, 1, 1, TimeValue.MAX_VALUE, null), false);
                        assertRolloverResult(dataStream, rolloverResult, before, testThreadPool.absoluteTimeInMillis(), metConditions, 1);
                        assertTelemetry(telemetryPlugin, null, List.of("es.auto_sharding.decrease_shards.total", "es.auto_sharding.increase_shards.total", "es.auto_sharding.cooldown_prevented_increase.total", "es.auto_sharding.cooldown_prevented_decrease.total"));
                    }
            }
        }
    } finally {
        testThreadPool.shutdown();
    }
}
218507.451207elasticsearch
public void testStringIncludeExclude() throws Exception {
    MappedFieldType ft1 = new KeywordFieldMapper.KeywordFieldType("mv_field", randomBoolean(), true, Collections.emptyMap());
    MappedFieldType ft2 = new KeywordFieldMapper.KeywordFieldType("sv_field", randomBoolean(), true, Collections.emptyMap());
    CheckedConsumer<RandomIndexWriter, IOException> buildIndex = iw -> {
        iw.addDocument(doc(ft1, ft2, "val000", "val001", "val001"));
        iw.addDocument(doc(ft1, ft2, "val002", "val003", "val003"));
        iw.addDocument(doc(ft1, ft2, "val004", "val005", "val005"));
        iw.addDocument(doc(ft1, ft2, "val006", "val007", "val007"));
        iw.addDocument(doc(ft1, ft2, "val008", "val009", "val009"));
        iw.addDocument(doc(ft1, ft2, "val010", "val011", "val011"));
    };
    String executionHint = randomFrom(TermsAggregatorFactory.ExecutionMode.values()).toString();
    AggregationBuilder builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude("val00.+", null, null, null)).field("mv_field").size(12).order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result8) -> {
        assertEquals(10, result8.getBuckets().size());
        assertEquals("val000", result8.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(0).getDocCount());
        assertEquals("val001", result8.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(1).getDocCount());
        assertEquals("val002", result8.getBuckets().get(2).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(2).getDocCount());
        assertEquals("val003", result8.getBuckets().get(3).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(3).getDocCount());
        assertEquals("val004", result8.getBuckets().get(4).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(4).getDocCount());
        assertEquals("val005", result8.getBuckets().get(5).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(5).getDocCount());
        assertEquals("val006", result8.getBuckets().get(6).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(6).getDocCount());
        assertEquals("val007", result8.getBuckets().get(7).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(7).getDocCount());
        assertEquals("val008", result8.getBuckets().get(8).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(8).getDocCount());
        assertEquals("val009", result8.getBuckets().get(9).getKeyAsString());
        assertEquals(1L, result8.getBuckets().get(9).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result8));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude("val00.+", null, null, null)).field("sv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result7) -> {
        assertEquals(5, result7.getBuckets().size());
        assertEquals("val001", result7.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result7.getBuckets().get(0).getDocCount());
        assertEquals("val003", result7.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result7.getBuckets().get(1).getDocCount());
        assertEquals("val005", result7.getBuckets().get(2).getKeyAsString());
        assertEquals(1L, result7.getBuckets().get(2).getDocCount());
        assertEquals("val007", result7.getBuckets().get(3).getKeyAsString());
        assertEquals(1L, result7.getBuckets().get(3).getDocCount());
        assertEquals("val009", result7.getBuckets().get(4).getKeyAsString());
        assertEquals(1L, result7.getBuckets().get(4).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result7));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude("val00.+", null, null, null)).field("sv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result6) -> {
        assertEquals(5, result6.getBuckets().size());
        assertEquals("val001", result6.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result6.getBuckets().get(0).getDocCount());
        assertEquals("val003", result6.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result6.getBuckets().get(1).getDocCount());
        assertEquals("val005", result6.getBuckets().get(2).getKeyAsString());
        assertEquals(1L, result6.getBuckets().get(2).getDocCount());
        assertEquals("val007", result6.getBuckets().get(3).getKeyAsString());
        assertEquals(1L, result6.getBuckets().get(3).getDocCount());
        assertEquals("val009", result6.getBuckets().get(4).getKeyAsString());
        assertEquals(1L, result6.getBuckets().get(4).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result6));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude("val00.+", "(val000|val001)", null, null)).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result5) -> {
        assertEquals(8, result5.getBuckets().size());
        assertEquals("val002", result5.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(0).getDocCount());
        assertEquals("val003", result5.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(1).getDocCount());
        assertEquals("val004", result5.getBuckets().get(2).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(2).getDocCount());
        assertEquals("val005", result5.getBuckets().get(3).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(3).getDocCount());
        assertEquals("val006", result5.getBuckets().get(4).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(4).getDocCount());
        assertEquals("val007", result5.getBuckets().get(5).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(5).getDocCount());
        assertEquals("val008", result5.getBuckets().get(6).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(6).getDocCount());
        assertEquals("val009", result5.getBuckets().get(7).getKeyAsString());
        assertEquals(1L, result5.getBuckets().get(7).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result5));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude(null, "val00.+", null, null)).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result4) -> {
        assertEquals(2, result4.getBuckets().size());
        assertEquals("val010", result4.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result4.getBuckets().get(0).getDocCount());
        assertEquals("val011", result4.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result4.getBuckets().get(1).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result4));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude(null, null, new TreeSet<>(Set.of(new BytesRef("val000"), new BytesRef("val010"))), null)).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result3) -> {
        assertEquals(2, result3.getBuckets().size());
        assertEquals("val000", result3.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result3.getBuckets().get(0).getDocCount());
        assertEquals("val010", result3.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result3.getBuckets().get(1).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result3));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef("val001"), new BytesRef("val002"), new BytesRef("val003"), new BytesRef("val004"), new BytesRef("val005"), new BytesRef("val006"), new BytesRef("val007"), new BytesRef("val008"), new BytesRef("val009"), new BytesRef("val011"))))).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result2) -> {
        assertEquals(2, result2.getBuckets().size());
        assertEquals("val000", result2.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result2.getBuckets().get(0).getDocCount());
        assertEquals("val010", result2.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result2.getBuckets().get(1).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result2));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude("val00.+", null, null, new TreeSet<>(Set.of(new BytesRef("val001"), new BytesRef("val002"), new BytesRef("val003"), new BytesRef("val004"), new BytesRef("val005"), new BytesRef("val006"), new BytesRef("val007"), new BytesRef("val008"))))).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result1) -> {
        assertEquals(2, result1.getBuckets().size());
        assertEquals("val000", result1.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result1.getBuckets().get(0).getDocCount());
        assertEquals("val009", result1.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result1.getBuckets().get(1).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result1));
    }, new AggTestConfig(builder, ft1, ft2));
    builder = new TermsAggregationBuilder("_name").executionHint(executionHint).includeExclude(new IncludeExclude(null, "val01.+", new TreeSet<>(Set.of(new BytesRef("val001"), new BytesRef("val002"), new BytesRef("val010"))), null)).field("mv_field").order(BucketOrder.key(true));
    testCase(buildIndex, (StringTerms result) -> {
        assertEquals(2, result.getBuckets().size());
        assertEquals("val001", result.getBuckets().get(0).getKeyAsString());
        assertEquals(1L, result.getBuckets().get(0).getDocCount());
        assertEquals("val002", result.getBuckets().get(1).getKeyAsString());
        assertEquals(1L, result.getBuckets().get(1).getDocCount());
        assertTrue(AggregationInspectionHelper.hasValue(result));
    }, new AggTestConfig(builder, ft1, ft2));
}
213865.631291elasticsearch
private Map<String, String> getErrorMessageToEncodedAuthentication() throws IOException {
    final User userFoo = new User("foo");
    final User userBar = new User("bar");
    final Authentication.RealmRef realm1 = new Authentication.RealmRef("realm_1", "realm_1", "node");
    final Authentication.RealmRef realm2 = new Authentication.RealmRef("realm_2", "realm_2", "node");
    return Map.ofEntries(entry("Anonymous authentication cannot have realm type [realm_1]", encodeAuthentication(new Subject(userFoo, realm1), Authentication.AuthenticationType.ANONYMOUS)), entry("Anonymous authentication cannot have domain", encodeAuthentication(new Subject(userFoo, new Authentication.RealmRef(AuthenticationField.ANONYMOUS_REALM_NAME, AuthenticationField.ANONYMOUS_REALM_TYPE, "node", new RealmDomain("domain1", Set.of(new RealmConfig.RealmIdentifier(AuthenticationField.ANONYMOUS_REALM_TYPE, AuthenticationField.ANONYMOUS_REALM_NAME))))), Authentication.AuthenticationType.ANONYMOUS)), entry("Anonymous authentication cannot have internal user [_xpack]", encodeAuthentication(new Subject(InternalUsers.XPACK_USER, Authentication.RealmRef.newAnonymousRealmRef("node")), Authentication.AuthenticationType.ANONYMOUS)), entry("Anonymous authentication cannot run-as other user", encodeAuthentication(new Subject(userBar, realm2), new Subject(userFoo, Authentication.RealmRef.newAnonymousRealmRef("node")), Authentication.AuthenticationType.ANONYMOUS)), entry("Internal authentication cannot have realm type [realm_1]", encodeAuthentication(new Subject(userFoo, realm1), Authentication.AuthenticationType.INTERNAL)), entry("Internal authentication cannot have domain", encodeAuthentication(new Subject(userFoo, new Authentication.RealmRef(AuthenticationField.FALLBACK_REALM_NAME, AuthenticationField.FALLBACK_REALM_TYPE, "node", new RealmDomain("domain1", Set.of(new RealmConfig.RealmIdentifier(AuthenticationField.FALLBACK_REALM_TYPE, AuthenticationField.FALLBACK_REALM_NAME))))), Authentication.AuthenticationType.INTERNAL)), entry("Internal authentication must have internal user", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newInternalAttachRealmRef("node")), Authentication.AuthenticationType.INTERNAL)), entry("API key authentication cannot have realm type [realm_1]", encodeAuthentication(new Subject(userFoo, realm1), Authentication.AuthenticationType.API_KEY)), entry("API key authentication cannot have domain", encodeAuthentication(new Subject(userFoo, new Authentication.RealmRef(AuthenticationField.API_KEY_REALM_NAME, AuthenticationField.API_KEY_REALM_TYPE, "node", new RealmDomain("domain1", Set.of(new RealmConfig.RealmIdentifier(AuthenticationField.API_KEY_REALM_TYPE, AuthenticationField.API_KEY_REALM_NAME))))), Authentication.AuthenticationType.API_KEY)), entry("API key authentication cannot have internal user [_xpack]", encodeAuthentication(new Subject(InternalUsers.XPACK_USER, Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.API_KEY)), entry("API key authentication user must have no role", encodeAuthentication(new Subject(new User("foo", "role"), Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.API_KEY)), entry("API key authentication requires metadata to contain a non-null API key ID", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.API_KEY)), entry("Cross cluster access authentication requires metadata to contain " + "a non-null serialized cross cluster access authentication field", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newCrossClusterAccessRealmRef("node"), TransportVersion.current(), Map.of(AuthenticationField.API_KEY_ID_KEY, "abc")), Authentication.AuthenticationType.API_KEY)), entry("Cross cluster access authentication cannot contain another cross cluster access authentication in its metadata", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newCrossClusterAccessRealmRef("node"), TransportVersion.current(), Map.of(AuthenticationField.API_KEY_ID_KEY, "abc", AuthenticationField.CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY, AuthenticationTestHelper.builder().crossClusterAccess().build())), Authentication.AuthenticationType.API_KEY)), entry("Cross cluster access authentication requires metadata to contain " + "a non-null serialized cross cluster access role descriptors field", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newCrossClusterAccessRealmRef("node"), TransportVersion.current(), Map.of(AuthenticationField.API_KEY_ID_KEY, "abc", AuthenticationField.CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY, Authentication.newRealmAuthentication(userBar, realm2))), Authentication.AuthenticationType.API_KEY)), entry("Cross cluster access authentication cannot run-as other user", encodeAuthentication(new Subject(userBar, realm2), new Subject(userFoo, Authentication.RealmRef.newCrossClusterAccessRealmRef("node"), TransportVersion.current(), Map.of(AuthenticationField.API_KEY_ID_KEY, "abc", AuthenticationField.CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY, Authentication.newRealmAuthentication(userBar, realm2), AuthenticationField.CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY, List.of())), Authentication.AuthenticationType.API_KEY)), entry("Realm authentication must have subject type of user", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.REALM)), entry("Token authentication cannot have internal user [_xpack]", encodeAuthentication(new Subject(InternalUsers.XPACK_USER, realm1), Authentication.AuthenticationType.TOKEN)), entry("Service account authentication cannot have domain", encodeAuthentication(new Subject(userFoo, new Authentication.RealmRef(ServiceAccountSettings.REALM_NAME, ServiceAccountSettings.REALM_TYPE, "node", new RealmDomain("domain1", Set.of(new RealmConfig.RealmIdentifier(ServiceAccountSettings.REALM_TYPE, ServiceAccountSettings.REALM_NAME))))), Authentication.AuthenticationType.TOKEN)), entry("Service account authentication user must have no role", encodeAuthentication(new Subject(new User("foo", "role"), Authentication.RealmRef.newServiceAccountRealmRef("node")), Authentication.AuthenticationType.TOKEN)), entry("Service account authentication cannot run-as other user", encodeAuthentication(new Subject(userBar, realm2), new Subject(userFoo, Authentication.RealmRef.newServiceAccountRealmRef("node")), Authentication.AuthenticationType.TOKEN)), entry("API key token authentication cannot have domain", encodeAuthentication(new Subject(userFoo, new Authentication.RealmRef(AuthenticationField.API_KEY_REALM_NAME, AuthenticationField.API_KEY_REALM_TYPE, "node", new RealmDomain("domain1", Set.of(new RealmConfig.RealmIdentifier(AuthenticationField.API_KEY_REALM_TYPE, AuthenticationField.API_KEY_REALM_NAME))))), Authentication.AuthenticationType.TOKEN)), entry("API key token authentication user must have no role", encodeAuthentication(new Subject(new User("foo", "role"), Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.TOKEN)), entry("API key token authentication requires metadata to contain a non-null API key ID", encodeAuthentication(new Subject(userFoo, Authentication.RealmRef.newApiKeyRealmRef("node")), Authentication.AuthenticationType.TOKEN)), entry("Run-as subject type cannot be [API_KEY]", encodeAuthentication(new Subject(userBar, Authentication.RealmRef.newApiKeyRealmRef("node")), new Subject(userFoo, realm1), Authentication.AuthenticationType.REALM)));
}
216822.079216elasticsearch
public void testMixedSingleValues() throws IOException {
    List<MultiValuesSourceFieldConfig> fields = new ArrayList<>();
    String stringExclude = randomBoolean() ? randomFrom("host-2", "192.168.0.1", "client-2", "127.0.0.1") : null;
    Integer intExclude = randomBoolean() ? randomIntBetween(0, 10) : null;
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD1).setIncludeExclude(stringExclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(stringExclude)))) : null).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD2).setIncludeExclude(stringExclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(stringExclude)))) : null).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD3).setIncludeExclude(stringExclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(stringExclude)))) : null).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(FLOAT_FIELD).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(INT_FIELD).setIncludeExclude(intExclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(String.valueOf(intExclude))))) : null).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(IP_FIELD).setIncludeExclude(stringExclude != null ? stringExclude.startsWith("1") ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(stringExclude)))) : null : null).build());
    double minimumSupport = randomDoubleBetween(0.13, 0.51, true);
    int minimumSetSize = randomIntBetween(2, 6);
    int size = randomIntBetween(1, 100);
    Query query = new MatchAllDocsQuery();
    MappedFieldType keywordType1 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD1);
    MappedFieldType keywordType2 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD2);
    MappedFieldType keywordType3 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD3);
    MappedFieldType intType = new NumberFieldMapper.NumberFieldType(INT_FIELD, NumberFieldMapper.NumberType.INTEGER);
    MappedFieldType floatType = new NumberFieldMapper.NumberFieldType(FLOAT_FIELD, NumberFieldMapper.NumberType.FLOAT);
    MappedFieldType ipType = new IpFieldMapper.IpFieldType(IP_FIELD);
    List<FrequentItemSet> expectedResults = List.of(new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD3, List.of("type-1")), 5, 0.5), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), KEYWORD_FIELD3, List.of("type-2")), 3, 0.3), new FrequentItemSet(Map.of(FLOAT_FIELD, List.of(4.1f), KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD2, List.of("client-1"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2), new FrequentItemSet(Map.of(IP_FIELD, List.of("192.168.0.5"), KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD2, List.of("client-2"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), KEYWORD_FIELD2, List.of("client-2"), KEYWORD_FIELD3, List.of("type-3")), 2, 0.2), new FrequentItemSet(Map.of(IP_FIELD, List.of("192.168.0.1"), FLOAT_FIELD, List.of(4.1f), INT_FIELD, List.of(2)), 2, 0.2), new FrequentItemSet(Map.of(FLOAT_FIELD, List.of(5.0f), KEYWORD_FIELD1, List.of("host-2")), 2, 0.2), new FrequentItemSet(Map.of(INT_FIELD, List.of(5), KEYWORD_FIELD1, List.of("host-2")), 2, 0.2), new FrequentItemSet(Map.of(FLOAT_FIELD, List.of(5.0f), KEYWORD_FIELD2, List.of("client-2")), 2, 0.2), new FrequentItemSet(Map.of(IP_FIELD, List.of("192.168.0.5"), KEYWORD_FIELD2, List.of("client-2")), 2, 0.2));
    FrequentItemSetsAggregationBuilder builder = new FrequentItemSetsAggregationBuilder("fi", fields, minimumSupport, minimumSetSize, size, null, randomFrom(EXECUTION_HINT_ALLOWED_MODES));
    testCase(iw -> {
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new NumericDocValuesField(INT_FIELD, 2), new FloatDocValuesField(FLOAT_FIELD, 4.1f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.1")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new NumericDocValuesField(INT_FIELD, 5), new FloatDocValuesField(FLOAT_FIELD, 3.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.4")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new NumericDocValuesField(INT_FIELD, 7), new FloatDocValuesField(FLOAT_FIELD, 5.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.4")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new NumericDocValuesField(INT_FIELD, 1), new FloatDocValuesField(FLOAT_FIELD, 5.2f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.22")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-3")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new NumericDocValuesField(INT_FIELD, 1), new FloatDocValuesField(FLOAT_FIELD, 14.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.12")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-5")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new NumericDocValuesField(INT_FIELD, 2), new FloatDocValuesField(FLOAT_FIELD, 4.1f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.1")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new NumericDocValuesField(INT_FIELD, 5), new FloatDocValuesField(FLOAT_FIELD, 5.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new NumericDocValuesField(INT_FIELD, 4), new FloatDocValuesField(FLOAT_FIELD, 4.1f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new NumericDocValuesField(INT_FIELD, 6), new FloatDocValuesField(FLOAT_FIELD, 7.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new NumericDocValuesField(INT_FIELD, 15), new FloatDocValuesField(FLOAT_FIELD, 25.0f), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.15")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-8")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2"))));
    }, (InternalItemSetMapReduceAggregation<?, ?, ?, EclatResult> results) -> {
        assertNotNull(results);
        assertResults(expectedResults, results.getMapReduceResult().getFrequentItemSets(), minimumSupport, minimumSetSize, size, stringExclude, intExclude);
    }, new AggTestConfig(builder, keywordType1, keywordType2, keywordType3, intType, floatType, ipType).withQuery(query));
}
215251.441242elasticsearch
private Map<String, Integer> setupComplexMocks() {
    Job opened1 = buildJob("opened1", Collections.singletonList(buildMinDetector("foo")), Collections.singletonMap("created_by", randomFrom("a-cool-module", "a_cool_module", "a cool module")));
    GetJobsStatsAction.Response.JobStats opened1JobStats = buildJobStats("opened1", JobState.OPENED, 100L, 3L);
    Job opened2 = buildJob("opened2", Arrays.asList(buildMinDetector("foo"), buildMinDetector("bar")));
    GetJobsStatsAction.Response.JobStats opened2JobStats = buildJobStats("opened2", JobState.OPENED, 200L, 8L);
    Job closed1 = buildJob("closed1", Arrays.asList(buildMinDetector("foo"), buildMinDetector("bar"), buildMinDetector("foobar")));
    GetJobsStatsAction.Response.JobStats closed1JobStats = buildJobStats("closed1", JobState.CLOSED, 300L, 0);
    givenJobs(Arrays.asList(opened1, opened2, closed1), Arrays.asList(opened1JobStats, opened2JobStats, closed1JobStats));
    givenDatafeeds(Arrays.asList(buildDatafeedStats(DatafeedState.STARTED), buildDatafeedStats(DatafeedState.STARTED), buildDatafeedStats(DatafeedState.STOPPED)));
    DataFrameAnalyticsConfig dfa1 = DataFrameAnalyticsConfigTests.createRandom("dfa_1");
    DataFrameAnalyticsConfig dfa2 = DataFrameAnalyticsConfigTests.createRandom("dfa_2");
    DataFrameAnalyticsConfig dfa3 = DataFrameAnalyticsConfigTests.createRandom("dfa_3");
    List<DataFrameAnalyticsConfig> dataFrameAnalytics = Arrays.asList(dfa1, dfa2, dfa3);
    givenDataFrameAnalytics(dataFrameAnalytics, Arrays.asList(buildDataFrameAnalyticsStats(dfa1.getId(), DataFrameAnalyticsState.STOPPED, null), buildDataFrameAnalyticsStats(dfa2.getId(), DataFrameAnalyticsState.STOPPED, 100L), buildDataFrameAnalyticsStats(dfa3.getId(), DataFrameAnalyticsState.STARTED, 200L)));
    Map<String, Integer> expectedDfaCountByAnalysis = new HashMap<>();
    dataFrameAnalytics.forEach(dfa -> {
        String analysisName = dfa.getAnalysis().getWriteableName();
        Integer analysisCount = expectedDfaCountByAnalysis.computeIfAbsent(analysisName, c -> 0);
        expectedDfaCountByAnalysis.put(analysisName, ++analysisCount);
    });
    TrainedModelConfig trainedModel1 = TrainedModelConfigTests.createTestInstance("model_1").setModelSize(100).setEstimatedOperations(200).setMetadata(Collections.singletonMap("analytics_config", "anything")).setInferenceConfig(ClassificationConfig.EMPTY_PARAMS).build();
    TrainedModelConfig trainedModel2 = TrainedModelConfigTests.createTestInstance("model_2").setModelSize(200).setEstimatedOperations(400).setMetadata(Collections.singletonMap("analytics_config", "anything")).setInferenceConfig(RegressionConfig.EMPTY_PARAMS).build();
    TrainedModelConfig trainedModel3 = TrainedModelConfigTests.createTestInstance("model_3").setModelSize(300).setEstimatedOperations(600).setInferenceConfig(new NerConfig(null, null, null, null)).build();
    TrainedModelConfig trainedModel4 = TrainedModelConfigTests.createTestInstance("model_4").setTags(Collections.singletonList("prepackaged")).setModelSize(1000).setEstimatedOperations(2000).setInferenceConfig(new TextExpansionConfig(null, null, null)).build();
    givenTrainedModels(Arrays.asList(trainedModel1, trainedModel2, trainedModel3, trainedModel4));
    givenTrainedModelStats(new GetTrainedModelsStatsAction.Response(new QueryPage<>(List.of(new GetTrainedModelsStatsAction.Response.TrainedModelStats(trainedModel1.getModelId(), new TrainedModelSizeStats(trainedModel1.getModelSize(), 0L), new IngestStats(new IngestStats.Stats(0, 0, 0, 0), List.of(), Map.of("pipeline_1", List.of(new IngestStats.ProcessorStat(InferenceProcessor.TYPE, InferenceProcessor.TYPE, new IngestStats.Stats(10, 1, 1000, 100)), new IngestStats.ProcessorStat(InferenceProcessor.TYPE, InferenceProcessor.TYPE, new IngestStats.Stats(20, 2, 2000, 200)), new IngestStats.ProcessorStat("grok", "grok", new IngestStats.Stats(100, 100, 100, 100))))), 1, null, null), new GetTrainedModelsStatsAction.Response.TrainedModelStats(trainedModel2.getModelId(), new TrainedModelSizeStats(trainedModel2.getModelSize(), 0L), new IngestStats(new IngestStats.Stats(0, 0, 0, 0), List.of(), Map.of("pipeline_1", List.of(new IngestStats.ProcessorStat(InferenceProcessor.TYPE, InferenceProcessor.TYPE, new IngestStats.Stats(30, 3, 3000, 300))))), 2, null, null), new GetTrainedModelsStatsAction.Response.TrainedModelStats(trainedModel3.getModelId(), new TrainedModelSizeStats(trainedModel3.getModelSize(), 0L), new IngestStats(new IngestStats.Stats(0, 0, 0, 0), List.of(), Map.of("pipeline_2", List.of(new IngestStats.ProcessorStat(InferenceProcessor.TYPE, InferenceProcessor.TYPE, new IngestStats.Stats(40, 4, 4000, 400))))), 3, null, new AssignmentStats("deployment_3", "model_3", null, null, null, null, Instant.now(), List.of(AssignmentStats.NodeStats.forStartedState(DiscoveryNodeUtils.create("foo", new TransportAddress(TransportAddress.META_ADDRESS, 2)), 3, 41.0, 41.0, 0, 1, 3L, 2, 3, lastAccess(3), Instant.now(), randomIntBetween(1, 16), randomIntBetween(1, 16), 1L, 2L, 33.0, 1L)), Priority.NORMAL).setState(AssignmentState.STOPPING)), new GetTrainedModelsStatsAction.Response.TrainedModelStats(trainedModel4.getModelId(), new TrainedModelSizeStats(trainedModel4.getModelSize(), 0L), new IngestStats(new IngestStats.Stats(0, 0, 0, 0), List.of(), Map.of("pipeline_3", List.of(new IngestStats.ProcessorStat(InferenceProcessor.TYPE, InferenceProcessor.TYPE, new IngestStats.Stats(50, 5, 5000, 500))))), 4, null, new AssignmentStats("deployment_4", "model_4", 2, 2, 1000, ByteSizeValue.ofBytes(1000), Instant.now(), List.of(AssignmentStats.NodeStats.forStartedState(DiscoveryNodeUtils.create("foo", new TransportAddress(TransportAddress.META_ADDRESS, 2)), 5, 41.0, 41.0, 0, 1, 3L, 2, 3, lastAccess(4), Instant.now(), randomIntBetween(1, 16), randomIntBetween(1, 16), 1L, 2L, 33.0, 1L), AssignmentStats.NodeStats.forStartedState(DiscoveryNodeUtils.create("bar", new TransportAddress(TransportAddress.META_ADDRESS, 3)), 4, 50.0, 50.0, 0, 1, 1L, 2, 3, lastAccess(44), Instant.now(), randomIntBetween(1, 16), randomIntBetween(1, 16), 2L, 4L, 34.0, 1L)), Priority.NORMAL).setState(AssignmentState.STARTED).setAllocationStatus(new AllocationStatus(2, 2)))), 0, GetTrainedModelsStatsAction.Response.RESULTS_FIELD)));
    return expectedDfaCountByAnalysis;
}
215795.9318184elasticsearch
public void testUsage() throws Exception {
    final boolean explicitlyDisabled = randomBoolean();
    final boolean enabled = explicitlyDisabled == false;
    final boolean operatorPrivilegesAvailable = randomBoolean();
    when(licenseState.isAllowed(Security.OPERATOR_PRIVILEGES_FEATURE)).thenReturn(operatorPrivilegesAvailable);
    final boolean remoteClusterServerAvailable = randomBoolean();
    when(licenseState.isAllowed(Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE)).thenReturn(remoteClusterServerAvailable);
    Settings.Builder settings = Settings.builder().put(this.settings);
    if (explicitlyDisabled) {
        settings.put("xpack.security.enabled", "false");
    }
    final boolean httpSSLEnabled = randomBoolean();
    settings.put("xpack.security.http.ssl.enabled", httpSSLEnabled);
    final boolean transportSSLEnabled = randomBoolean();
    settings.put("xpack.security.transport.ssl.enabled", transportSSLEnabled);
    final boolean remoteClusterServerEnabled = explicitlyDisabled ? false : randomBoolean();
    settings.put("remote_cluster_server.enabled", remoteClusterServerEnabled);
    final boolean remoteClusterServerSslEnabled = randomBoolean();
    settings.put("xpack.security.remote_cluster_server.ssl.enabled", remoteClusterServerSslEnabled);
    final boolean remoteClusterClientSslEnabled = randomBoolean();
    settings.put("xpack.security.remote_cluster_client.ssl.enabled", remoteClusterClientSslEnabled);
    boolean configureEnabledFlagForTokenService = randomBoolean();
    final boolean tokenServiceEnabled;
    if (configureEnabledFlagForTokenService) {
        tokenServiceEnabled = randomBoolean();
        settings.put("xpack.security.authc.token.enabled", tokenServiceEnabled);
    } else {
        tokenServiceEnabled = httpSSLEnabled;
    }
    boolean configureEnabledFlagForApiKeyService = randomBoolean();
    final boolean apiKeyServiceEnabled;
    if (configureEnabledFlagForApiKeyService) {
        apiKeyServiceEnabled = randomBoolean();
        settings.put("xpack.security.authc.api_key.enabled", apiKeyServiceEnabled);
    } else {
        apiKeyServiceEnabled = true;
    }
    final boolean auditingEnabled = randomBoolean();
    settings.put(XPackSettings.AUDIT_ENABLED.getKey(), auditingEnabled);
    final boolean httpIpFilterEnabled = randomBoolean();
    final boolean transportIPFilterEnabled = randomBoolean();
    when(ipFilter.usageStats()).thenReturn(Map.of("http", Map.of("enabled", httpIpFilterEnabled), "transport", Map.of("enabled", transportIPFilterEnabled)));
    final boolean rolesStoreEnabled = randomBoolean();
    configureRoleStoreUsage(rolesStoreEnabled);
    final boolean roleMappingStoreEnabled = randomBoolean();
    configureRoleMappingStoreUsage(roleMappingStoreEnabled);
    Map<String, Object> realmsUsageStats = new HashMap<>();
    for (int i = 0; i < 5; i++) {
        Map<String, Object> realmUsage = new HashMap<>();
        realmsUsageStats.put("type" + i, realmUsage);
        realmUsage.put("key1", Arrays.asList("value" + i));
        realmUsage.put("key2", Arrays.asList(i));
        realmUsage.put("key3", Arrays.asList(i % 2 == 0));
    }
    configureRealmsUsage(realmsUsageStats);
    final boolean anonymousEnabled = randomBoolean();
    if (anonymousEnabled) {
        settings.put(AnonymousUser.ROLES_SETTING.getKey(), "foo");
    }
    final boolean fips140Enabled = randomBoolean();
    if (fips140Enabled) {
        settings.put("xpack.security.fips_mode.enabled", true);
    }
    final boolean operatorPrivilegesEnabled = randomBoolean();
    if (operatorPrivilegesEnabled) {
        settings.put("xpack.security.operator_privileges.enabled", true);
    }
    final Map<String, Object> userProfileUsage = Map.of("total", randomIntBetween(100, 200), "enabled", randomIntBetween(50, 99), "recent", randomIntBetween(1, 42));
    doAnswer(invocation -> {
        @SuppressWarnings("unchecked")
        final var listener = (ActionListener<Map<String, Object>>) invocation.getArguments()[0];
        listener.onResponse(userProfileUsage);
        return null;
    }).when(profileService).usageStats(anyActionListener());
    final int ccsKeys = randomIntBetween(0, 50);
    final int ccrKeys = randomIntBetween(0, 50);
    final int ccsCcrKeys = randomIntBetween(0, 50);
    final Map<String, Object> crossClusterApiKeyUsage = Map.of("total", ccsKeys + ccrKeys + ccsCcrKeys, "ccs", ccsKeys, "ccr", ccrKeys, "ccs_ccr", ccsCcrKeys);
    doAnswer(invocation -> {
        final ActionListener<Map<String, Object>> listener = invocation.getArgument(0);
        listener.onResponse(apiKeyServiceEnabled ? crossClusterApiKeyUsage : Map.of());
        return null;
    }).when(apiKeyService).crossClusterApiKeyUsageStats(anyActionListener());
    var usageAction = newUsageAction(settings.build());
    PlainActionFuture<XPackUsageFeatureResponse> future = new PlainActionFuture<>();
    usageAction.masterOperation(null, null, null, future);
    SecurityFeatureSetUsage securityUsage = (SecurityFeatureSetUsage) future.get().getUsage();
    BytesStreamOutput out = new BytesStreamOutput();
    securityUsage.writeTo(out);
    XPackFeatureSet.Usage serializedUsage = new SecurityFeatureSetUsage(out.bytes().streamInput());
    for (XPackFeatureSet.Usage usage : Arrays.asList(securityUsage, serializedUsage)) {
        assertThat(usage, is(notNullValue()));
        assertThat(usage.name(), is(XPackField.SECURITY));
        assertThat(usage.enabled(), is(enabled));
        assertThat(usage.available(), is(true));
        XContentSource source = getXContentSource(usage);
        if (enabled) {
            for (int i = 0; i < 5; i++) {
                assertThat(source.getValue("realms.type" + i + ".key1"), contains("value" + i));
                assertThat(source.getValue("realms.type" + i + ".key2"), contains(i));
                assertThat(source.getValue("realms.type" + i + ".key3"), contains(i % 2 == 0));
            }
            assertThat(source.getValue("ssl.http.enabled"), is(httpSSLEnabled));
            assertThat(source.getValue("ssl.transport.enabled"), is(transportSSLEnabled));
            assertThat(source.getValue("token_service.enabled"), is(tokenServiceEnabled));
            assertThat(source.getValue("api_key_service.enabled"), is(apiKeyServiceEnabled));
            assertThat(source.getValue("audit.enabled"), is(auditingEnabled));
            if (auditingEnabled) {
                assertThat(source.getValue("audit.outputs"), contains(LoggingAuditTrail.NAME));
            } else {
                assertThat(source.getValue("audit.outputs"), is(nullValue()));
            }
            assertThat(source.getValue("ipfilter.http.enabled"), is(httpIpFilterEnabled));
            assertThat(source.getValue("ipfilter.transport.enabled"), is(transportIPFilterEnabled));
            if (rolesStoreEnabled) {
                assertThat(source.getValue("roles.count"), is(1));
            } else {
                assertThat(((Map) source.getValue("roles")).isEmpty(), is(true));
            }
            if (roleMappingStoreEnabled) {
                assertThat(source.getValue("role_mapping.native.size"), is(12));
                assertThat(source.getValue("role_mapping.native.enabled"), is(10));
            } else {
                final Map<String, Object> roleMapping = source.getValue("role_mapping.native");
                assertThat(roleMapping.entrySet(), emptyIterable());
            }
            assertThat(source.getValue("anonymous.enabled"), is(anonymousEnabled));
            assertThat(source.getValue("fips_140.enabled"), is(fips140Enabled));
            assertThat(source.getValue("operator_privileges.available"), is(operatorPrivilegesAvailable));
            assertThat(source.getValue("operator_privileges.enabled"), is(operatorPrivilegesEnabled));
            assertThat(source.getValue("user_profile.total"), equalTo(userProfileUsage.get("total")));
            assertThat(source.getValue("user_profile.enabled"), equalTo(userProfileUsage.get("enabled")));
            assertThat(source.getValue("user_profile.recent"), equalTo(userProfileUsage.get("recent")));
            assertThat(source.getValue("ssl.http.enabled"), is(httpSSLEnabled));
            assertThat(source.getValue("ssl.transport.enabled"), is(transportSSLEnabled));
            if (remoteClusterServerEnabled) {
                assertThat(source.getValue("ssl.remote_cluster_server.enabled"), is(remoteClusterServerSslEnabled));
            } else {
                assertThat(source.getValue("ssl.remote_cluster_server.enabled"), nullValue());
            }
            assertThat(source.getValue("ssl.remote_cluster_client.enabled"), is(remoteClusterClientSslEnabled));
            assertThat(source.getValue("remote_cluster_server.available"), is(remoteClusterServerAvailable));
            assertThat(source.getValue("remote_cluster_server.enabled"), is(remoteClusterServerEnabled));
            if (apiKeyServiceEnabled) {
                assertThat(source.getValue("remote_cluster_server.api_keys.total"), equalTo(crossClusterApiKeyUsage.get("total")));
                assertThat(source.getValue("remote_cluster_server.api_keys.ccs"), equalTo(ccsKeys));
                assertThat(source.getValue("remote_cluster_server.api_keys.ccr"), equalTo(ccrKeys));
                assertThat(source.getValue("remote_cluster_server.api_keys.ccs_ccr"), equalTo(ccsCcrKeys));
            } else {
                assertThat(source.getValue("remote_cluster_server.api_keys"), anEmptyMap());
            }
        } else {
            assertThat(source.getValue("ssl"), is(nullValue()));
            assertThat(source.getValue("realms"), is(nullValue()));
            assertThat(source.getValue("token_service"), is(nullValue()));
            assertThat(source.getValue("api_key_service"), is(nullValue()));
            assertThat(source.getValue("audit"), is(nullValue()));
            assertThat(source.getValue("anonymous"), is(nullValue()));
            assertThat(source.getValue("ipfilter"), is(nullValue()));
            assertThat(source.getValue("roles"), is(nullValue()));
            assertThat(source.getValue("operator_privileges"), is(nullValue()));
            assertThat(source.getValue("user_profile"), is(nullValue()));
            assertThat(source.getValue("remote_cluster_server"), is(nullValue()));
        }
    }
}
212176.347176gwt
private static void assertArrayEquals(Object actual, Object expected, String message) {
    if (actual.getClass().isArray()) {
        if (actual instanceof Object[] && expected instanceof Object[]) {
            Object[] actualArray = (Object[]) actual;
            Object[] expectedArray = (Object[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    Object _actual = actualArray[i];
                    Object _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof int[] && expected instanceof int[]) {
            int[] actualArray = (int[]) actual;
            int[] expectedArray = (int[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    int _actual = actualArray[i];
                    int _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof float[] && expected instanceof float[]) {
            float[] actualArray = (float[]) actual;
            float[] expectedArray = (float[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    float _actual = actualArray[i];
                    float _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof long[] && expected instanceof long[]) {
            long[] actualArray = (long[]) actual;
            long[] expectedArray = (long[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    long _actual = actualArray[i];
                    long _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof double[] && expected instanceof double[]) {
            double[] actualArray = (double[]) actual;
            double[] expectedArray = (double[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    double _actual = actualArray[i];
                    double _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof boolean[] && expected instanceof boolean[]) {
            boolean[] actualArray = (boolean[]) actual;
            boolean[] expectedArray = (boolean[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    boolean _actual = actualArray[i];
                    boolean _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof byte[] && expected instanceof byte[]) {
            byte[] actualArray = (byte[]) actual;
            byte[] expectedArray = (byte[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    byte _actual = actualArray[i];
                    byte _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof short[] && expected instanceof short[]) {
            short[] actualArray = (short[]) actual;
            short[] expectedArray = (short[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    short _actual = actualArray[i];
                    short _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        } else if (actual instanceof char[] && expected instanceof char[]) {
            char[] actualArray = (char[]) actual;
            char[] expectedArray = (char[]) expected;
            int expectedLength = expectedArray.length;
            if (expectedLength == actualArray.length) {
                for (int i = 0; i < expectedLength; i++) {
                    char _actual = actualArray[i];
                    char _expected = expectedArray[i];
                    try {
                        assertEquals(_actual, _expected);
                    } catch (AssertionError ae) {
                        failArrayValuesAtIndexNotEqual(_actual, _expected, i, message);
                    }
                }
                return;
            } else {
                failArrayLengthsNotEqual(actualArray.length, expectedLength, message);
            }
        }
    }
    failNotEquals(actual, expected, message);
}
216314.812234hadoop
public void testListClusterNodes() throws Exception {
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    nodeReports.addAll(getNodeReports(1, NodeState.NEW));
    nodeReports.addAll(getNodeReports(2, NodeState.RUNNING));
    nodeReports.addAll(getNodeReports(1, NodeState.UNHEALTHY));
    nodeReports.addAll(getNodeReports(1, NodeState.DECOMMISSIONED));
    nodeReports.addAll(getNodeReports(1, NodeState.REBOOTED));
    nodeReports.addAll(getNodeReports(1, NodeState.LOST));
    NodeCLI cli = createAndGetNodeCLI();
    Set<NodeState> nodeStates = new HashSet<NodeState>();
    nodeStates.add(NodeState.NEW);
    NodeState[] states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    int result = cli.run(new String[] { "-list", "-states", "NEW" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintWriter pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    String nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.RUNNING);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "RUNNING" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:2");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    result = cli.run(new String[] { "-list" });
    assertEquals(0, result);
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    result = cli.run(new String[] { "-list", "-showDetails" });
    assertEquals(0, result);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:2");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.println("Detailed Node Information :");
    pw.println("\tConfigured Resources : <memory:0, vCores:0>");
    pw.println("\tAllocated Resources : <memory:0, vCores:0>");
    pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
    pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
    pw.println("\tNode-Labels : ");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.println("Detailed Node Information :");
    pw.println("\tConfigured Resources : <memory:0, vCores:0>");
    pw.println("\tAllocated Resources : <memory:0, vCores:0>");
    pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
    pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
    pw.println("\tNode-Labels : ");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.UNHEALTHY);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "UNHEALTHY" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t      UNHEALTHY\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.DECOMMISSIONED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "DECOMMISSIONED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t DECOMMISSIONED\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.REBOOTED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "REBOOTED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.LOST);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "LOST" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.NEW);
    nodeStates.add(NodeState.RUNNING);
    nodeStates.add(NodeState.LOST);
    nodeStates.add(NodeState.REBOOTED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "NEW,RUNNING,LOST,REBOOTED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:5");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    nodeStates.clear();
    for (NodeState s : NodeState.values()) {
        nodeStates.add(s);
    }
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-All" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:7");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t      UNHEALTHY\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t DECOMMISSIONED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(10)).write(any(byte[].class), anyInt(), anyInt());
    sysOutStream.reset();
    result = cli.run(new String[] { "-list", "-states", "InvalidState" });
    assertEquals(-1, result);
}
217119.0611201hadoop
public void testWriteEntityToHBase() throws Exception {
    TimelineEntities te = new TimelineEntities();
    TimelineEntity entity = new TimelineEntity();
    String id = "hello";
    String type = "world";
    entity.setId(id);
    entity.setType(type);
    Long cTime = 1425016501000L;
    entity.setCreatedTime(cTime);
    Map<String, Object> infoMap = new HashMap<String, Object>();
    infoMap.put("infoMapKey1", "infoMapValue1");
    infoMap.put("infoMapKey2", 10);
    entity.addInfo(infoMap);
    String key = "task";
    String value = "is_related_to_entity_id_here";
    Set<String> isRelatedToSet = new HashSet<String>();
    isRelatedToSet.add(value);
    Map<String, Set<String>> isRelatedTo = new HashMap<String, Set<String>>();
    isRelatedTo.put(key, isRelatedToSet);
    entity.setIsRelatedToEntities(isRelatedTo);
    key = "container";
    value = "relates_to_entity_id_here";
    Set<String> relatesToSet = new HashSet<String>();
    relatesToSet.add(value);
    value = "relates_to_entity_id_here_Second";
    relatesToSet.add(value);
    Map<String, Set<String>> relatesTo = new HashMap<String, Set<String>>();
    relatesTo.put(key, relatesToSet);
    entity.setRelatesToEntities(relatesTo);
    Map<String, String> conf = new HashMap<String, String>();
    conf.put("config_param1", "value1");
    conf.put("config_param2", "value2");
    entity.addConfigs(conf);
    Set<TimelineMetric> metrics = new HashSet<>();
    TimelineMetric m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    Map<Long, Number> metricValues = new HashMap<Long, Number>();
    long ts = System.currentTimeMillis();
    metricValues.put(ts - 120000, 100000000);
    metricValues.put(ts - 100000, 200000000);
    metricValues.put(ts - 80000, 300000000);
    metricValues.put(ts - 60000, 400000000);
    metricValues.put(ts - 40000, 50000000000L);
    metricValues.put(ts - 20000, 60000000000L);
    m1.setType(Type.TIME_SERIES);
    m1.setValues(metricValues);
    metrics.add(m1);
    entity.addMetrics(metrics);
    te.addEntity(new SubApplicationEntity(entity));
    HBaseTimelineWriterImpl hbi = null;
    try {
        Configuration c1 = util.getConfiguration();
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(c1);
        hbi.start();
        String cluster = "cluster_test_write_entity";
        String user = "user1";
        String subAppUser = "subAppUser1";
        String flow = "some_flow_name";
        String flowVersion = "AB7822C10F1111";
        long runid = 1002345678919L;
        String appName = HBaseTimelineSchemaUtils.convertApplicationIdToString(ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1));
        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion, runid, appName), te, UserGroupInformation.createRemoteUser(subAppUser));
        hbi.stop();
        Scan s = new Scan();
        byte[] startRow = new EntityRowKeyPrefix(cluster, user, flow, runid, appName).getRowKeyPrefix();
        s.setStartRow(startRow);
        s.setMaxVersions(Integer.MAX_VALUE);
        Connection conn = ConnectionFactory.createConnection(c1);
        ResultScanner scanner = new EntityTableRW().getResultScanner(c1, conn, s);
        int rowCount = 0;
        int colCount = 0;
        KeyConverter<String> stringKeyConverter = new StringKeyConverter();
        for (Result result : scanner) {
            if (result != null && !result.isEmpty()) {
                rowCount++;
                colCount += result.size();
                byte[] row1 = result.getRow();
                assertTrue(isRowKeyCorrect(row1, cluster, user, flow, runid, appName, entity));
                String id1 = ColumnRWHelper.readResult(result, EntityColumn.ID).toString();
                assertEquals(id, id1);
                String type1 = ColumnRWHelper.readResult(result, EntityColumn.TYPE).toString();
                assertEquals(type, type1);
                Long cTime1 = (Long) ColumnRWHelper.readResult(result, EntityColumn.CREATED_TIME);
                assertEquals(cTime1, cTime);
                Map<String, Object> infoColumns = ColumnRWHelper.readResults(result, EntityColumnPrefix.INFO, new StringKeyConverter());
                assertEquals(infoMap, infoColumns);
                for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo.entrySet()) {
                    Object isRelatedToValue = ColumnRWHelper.readResult(result, EntityColumnPrefix.IS_RELATED_TO, isRelatedToEntry.getKey());
                    String compoundValue = isRelatedToValue.toString();
                    Set<String> isRelatedToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
                    assertEquals(isRelatedTo.get(isRelatedToEntry.getKey()).size(), isRelatedToValues.size());
                    for (String v : isRelatedToEntry.getValue()) {
                        assertTrue(isRelatedToValues.contains(v));
                    }
                }
                for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo.entrySet()) {
                    String compoundValue = ColumnRWHelper.readResult(result, EntityColumnPrefix.RELATES_TO, relatesToEntry.getKey()).toString();
                    Set<String> relatesToValues = new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
                    assertEquals(relatesTo.get(relatesToEntry.getKey()).size(), relatesToValues.size());
                    for (String v : relatesToEntry.getValue()) {
                        assertTrue(relatesToValues.contains(v));
                    }
                }
                Map<String, Object> configColumns = ColumnRWHelper.readResults(result, EntityColumnPrefix.CONFIG, stringKeyConverter);
                assertEquals(conf, configColumns);
                NavigableMap<String, NavigableMap<Long, Number>> metricsResult = ColumnRWHelper.readResultsWithTimestamps(result, EntityColumnPrefix.METRIC, stringKeyConverter);
                NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
                matchMetrics(metricValues, metricMap);
            }
        }
        assertEquals(1, rowCount);
        assertEquals(16, colCount);
        TimelineEntity e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appName, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), Integer.MAX_VALUE, null, null));
        Set<TimelineEntity> es1 = reader.getEntities(new TimelineReaderContext(cluster, user, flow, runid, appName, entity.getType(), null), new TimelineEntityFilters.Builder().build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), Integer.MAX_VALUE, null, null));
        assertNotNull(e1);
        assertEquals(1, es1.size());
        assertEquals(id, e1.getId());
        assertEquals(type, e1.getType());
        assertEquals(cTime, e1.getCreatedTime());
        Map<String, Object> infoMap2 = e1.getInfo();
        infoMap2.remove("FROM_ID");
        assertEquals(infoMap, infoMap2);
        Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
        assertEquals(isRelatedTo, isRelatedTo2);
        Map<String, Set<String>> relatesTo2 = e1.getRelatesToEntities();
        assertEquals(relatesTo, relatesTo2);
        Map<String, String> conf2 = e1.getConfigs();
        assertEquals(conf, conf2);
        Set<TimelineMetric> metrics2 = e1.getMetrics();
        assertEquals(metrics, metrics2);
        for (TimelineMetric metric2 : metrics2) {
            Map<Long, Number> metricValues2 = metric2.getValues();
            matchMetrics(metricValues, metricValues2);
        }
        e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow, runid, appName, entity.getType(), entity.getId()), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
        assertNotNull(e1);
        assertEquals(id, e1.getId());
        assertEquals(type, e1.getType());
        assertEquals(cTime, e1.getCreatedTime());
        infoMap2 = e1.getInfo();
        infoMap2.remove("FROM_ID");
        assertEquals(infoMap, infoMap2);
        assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
        assertEquals(relatesTo, e1.getRelatesToEntities());
        assertEquals(conf, e1.getConfigs());
        for (TimelineMetric metric : e1.getMetrics()) {
            assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType());
            assertEquals(1, metric.getValues().size());
            assertTrue(metric.getValues().containsKey(ts - 20000));
            assertEquals(metricValues.get(ts - 20000), metric.getValues().get(ts - 20000));
        }
        verifySubApplicationTableEntities(cluster, user, flow, flowVersion, runid, appName, subAppUser, c1, entity, id, type, infoMap, isRelatedTo, relatesTo, conf, metricValues, metrics, cTime, m1);
    } finally {
        if (hbi != null) {
            hbi.stop();
            hbi.close();
        }
    }
}
214174.5220198kafka
private void generateClassWriter(String className, StructSpec struct, Versions parentVersions) {
    headerGenerator.addImport(MessageGenerator.WRITABLE_CLASS);
    headerGenerator.addImport(MessageGenerator.OBJECT_SERIALIZATION_CACHE_CLASS);
    buffer.printf("@Override%n");
    buffer.printf("public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {%n");
    buffer.incrementIndent();
    VersionConditional.forVersions(struct.versions(), parentVersions).allowMembershipCheckAlwaysFalse(false).ifNotMember(__ -> {
        headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS);
        buffer.printf("throw new UnsupportedVersionException(\"Can't write " + "version \" + _version + \" of %s\");%n", className);
    }).generate(buffer);
    buffer.printf("int _numTaggedFields = 0;%n");
    Versions curVersions = parentVersions.intersect(struct.versions());
    TreeMap<Integer, FieldSpec> taggedFields = new TreeMap<>();
    for (FieldSpec field : struct.fields()) {
        VersionConditional cond = VersionConditional.forVersions(field.versions(), curVersions).ifMember(presentVersions -> {
            VersionConditional.forVersions(field.taggedVersions(), presentVersions).ifNotMember(presentAndUntaggedVersions -> {
                if (field.type().isVariableLength() && !field.type().isStruct()) {
                    ClauseGenerator callGenerateVariableLengthWriter = versions -> {
                        generateVariableLengthWriter(fieldFlexibleVersions(field), field.camelCaseName(), field.type(), versions, field.nullableVersions(), field.zeroCopy());
                    };
                    if (field.type().isArray() && ((FieldType.ArrayType) field.type()).elementType().serializationIsDifferentInFlexibleVersions()) {
                        VersionConditional.forVersions(fieldFlexibleVersions(field), presentAndUntaggedVersions).ifMember(callGenerateVariableLengthWriter).ifNotMember(callGenerateVariableLengthWriter).generate(buffer);
                    } else {
                        callGenerateVariableLengthWriter.generate(presentAndUntaggedVersions);
                    }
                } else if (field.type().isStruct()) {
                    IsNullConditional.forName(field.camelCaseName()).possibleVersions(presentAndUntaggedVersions).nullableVersions(field.nullableVersions()).ifNull(() -> {
                        VersionConditional.forVersions(field.nullableVersions(), presentAndUntaggedVersions).ifMember(__ -> {
                            buffer.printf("_writable.writeByte((byte) -1);%n");
                        }).ifNotMember(__ -> {
                            buffer.printf("throw new NullPointerException();%n");
                        }).generate(buffer);
                    }).ifShouldNotBeNull(() -> {
                        VersionConditional.forVersions(field.nullableVersions(), presentAndUntaggedVersions).ifMember(__ -> {
                            buffer.printf("_writable.writeByte((byte) 1);%n");
                        }).generate(buffer);
                        buffer.printf("%s;%n", primitiveWriteExpression(field.type(), field.camelCaseName()));
                    }).generate(buffer);
                } else {
                    buffer.printf("%s;%n", primitiveWriteExpression(field.type(), field.camelCaseName()));
                }
            }).ifMember(__ -> {
                field.generateNonDefaultValueCheck(headerGenerator, structRegistry, buffer, "this.", field.nullableVersions());
                buffer.incrementIndent();
                buffer.printf("_numTaggedFields++;%n");
                buffer.decrementIndent();
                buffer.printf("}%n");
                if (taggedFields.put(field.tag().get(), field) != null) {
                    throw new RuntimeException("Field " + field.name() + " has tag " + field.tag() + ", but another field already used that tag.");
                }
            }).generate(buffer);
        });
        if (!field.ignorable()) {
            cond.ifNotMember(__ -> {
                field.generateNonIgnorableFieldCheck(headerGenerator, structRegistry, "this.", buffer);
            });
        }
        cond.generate(buffer);
    }
    headerGenerator.addImport(MessageGenerator.RAW_TAGGED_FIELD_WRITER_CLASS);
    buffer.printf("RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);%n");
    buffer.printf("_numTaggedFields += _rawWriter.numFields();%n");
    VersionConditional.forVersions(messageFlexibleVersions, curVersions).ifNotMember(__ -> {
        generateCheckForUnsupportedNumTaggedFields("_numTaggedFields > 0");
    }).ifMember(flexibleVersions -> {
        buffer.printf("_writable.writeUnsignedVarint(_numTaggedFields);%n");
        int prevTag = -1;
        for (FieldSpec field : taggedFields.values()) {
            if (prevTag + 1 != field.tag().get()) {
                buffer.printf("_rawWriter.writeRawTags(_writable, %d);%n", field.tag().get());
            }
            VersionConditional.forVersions(field.taggedVersions().intersect(field.versions()), flexibleVersions).allowMembershipCheckAlwaysFalse(false).ifMember(presentAndTaggedVersions -> {
                IsNullConditional cond = IsNullConditional.forName(field.camelCaseName()).nullableVersions(field.nullableVersions()).possibleVersions(presentAndTaggedVersions).alwaysEmitBlockScope(true).ifShouldNotBeNull(() -> {
                    if (!field.defaultString().equals("null")) {
                        field.generateNonDefaultValueCheck(headerGenerator, structRegistry, buffer, "this.", Versions.NONE);
                        buffer.incrementIndent();
                    }
                    buffer.printf("_writable.writeUnsignedVarint(%d);%n", field.tag().get());
                    if (field.type().isString()) {
                        buffer.printf("byte[] _stringBytes = _cache.getSerializedValue(this.%s);%n", field.camelCaseName());
                        headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                        buffer.printf("_writable.writeUnsignedVarint(_stringBytes.length + " + "ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));%n");
                        buffer.printf("_writable.writeUnsignedVarint(_stringBytes.length + 1);%n");
                        buffer.printf("_writable.writeByteArray(_stringBytes);%n");
                    } else if (field.type().isBytes()) {
                        headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                        buffer.printf("_writable.writeUnsignedVarint(this.%s.length + " + "ByteUtils.sizeOfUnsignedVarint(this.%s.length + 1));%n", field.camelCaseName(), field.camelCaseName());
                        buffer.printf("_writable.writeUnsignedVarint(this.%s.length + 1);%n", field.camelCaseName());
                        buffer.printf("_writable.writeByteArray(this.%s);%n", field.camelCaseName());
                    } else if (field.type().isArray()) {
                        headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                        buffer.printf("_writable.writeUnsignedVarint(_cache.getArraySizeInBytes(this.%s));%n", field.camelCaseName());
                        generateVariableLengthWriter(fieldFlexibleVersions(field), field.camelCaseName(), field.type(), presentAndTaggedVersions, Versions.NONE, field.zeroCopy());
                    } else if (field.type().isStruct()) {
                        VersionConditional.forVersions(field.nullableVersions(), presentAndTaggedVersions).ifMember(___ -> {
                            buffer.printf("_writable.writeUnsignedVarint(this.%s.size(_cache, _version) + 1);%n", field.camelCaseName());
                            buffer.printf("_writable.writeUnsignedVarint(1);%n");
                        }).ifNotMember(___ -> {
                            buffer.printf("_writable.writeUnsignedVarint(this.%s.size(_cache, _version));%n", field.camelCaseName());
                        }).generate(buffer);
                        buffer.printf("%s;%n", primitiveWriteExpression(field.type(), field.camelCaseName()));
                    } else if (field.type().isRecords()) {
                        throw new RuntimeException("Unsupported attempt to declare field `" + field.name() + "` with `records` type as a tagged field.");
                    } else {
                        buffer.printf("_writable.writeUnsignedVarint(%d);%n", field.type().fixedLength().get());
                        buffer.printf("%s;%n", primitiveWriteExpression(field.type(), field.camelCaseName()));
                    }
                    if (!field.defaultString().equals("null")) {
                        buffer.decrementIndent();
                        buffer.printf("}%n");
                    }
                });
                if (!field.defaultString().equals("null")) {
                    cond.ifNull(() -> {
                        buffer.printf("_writable.writeUnsignedVarint(%d);%n", field.tag().get());
                        buffer.printf("_writable.writeUnsignedVarint(1);%n");
                        buffer.printf("_writable.writeUnsignedVarint(0);%n");
                    });
                }
                cond.generate(buffer);
            }).generate(buffer);
            prevTag = field.tag().get();
        }
        if (prevTag < Integer.MAX_VALUE) {
            buffer.printf("_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);%n");
        }
    }).generate(buffer);
    buffer.decrementIndent();
    buffer.printf("}%n");
}
212626.621236kafka
public void shouldIgnoreOutOfOrderRecordsIffVersioned() {
    final Topology topology = getTopology(streamsConfig, materialized ? "store" : null, leftJoin, rejoin, leftVersioned, rightVersioned);
    try (final TopologyTestDriver driver = new TopologyTestDriver(topology, streamsConfig)) {
        final TestInputTopic<String, String> right = driver.createInputTopic(RIGHT_TABLE, new StringSerializer(), new StringSerializer());
        final TestInputTopic<String, String> left = driver.createInputTopic(LEFT_TABLE, new StringSerializer(), new StringSerializer());
        final TestOutputTopic<String, String> outputTopic = driver.createOutputTopic(OUTPUT, new StringDeserializer(), new StringDeserializer());
        final KeyValueStore<String, String> store = driver.getKeyValueStore("store");
        right.pipeInput("rhs1", "rhsValue1", baseTimestamp + 4);
        assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
        if (materialized) {
            assertThat(asMap(store), is(emptyMap()));
        }
        left.pipeInput("lhs1", "lhsValue1|rhs1", baseTimestamp + 3);
        left.pipeInput("lhs2", "lhsValue2|rhs1", baseTimestamp + 5);
        {
            final Map<String, String> expected = mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2|rhs1,rhsValue1)"));
            assertThat(outputTopic.readKeyValuesToMap(), is(expected));
            if (materialized) {
                assertThat(asMap(store), is(expected));
            }
        }
        left.pipeInput("lhs2", null, baseTimestamp + 6);
        {
            assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs2", null))));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"))));
            }
        }
        left.pipeInput("lhs1", "lhsValue1_ooo|rhs1", baseTimestamp + 2);
        left.pipeInput("lhs2", "lhsValue2_ooo|rhs1", baseTimestamp + 2);
        if (leftVersioned) {
            assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"))));
            }
        } else {
            final Map<String, String> expected = mkMap(mkEntry("lhs1", "(lhsValue1_ooo|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2_ooo|rhs1,rhsValue1)"));
            assertThat(outputTopic.readKeyValuesToMap(), is(expected));
            if (materialized) {
                assertThat(asMap(store), is(expected));
            }
        }
        left.pipeInput("lhs1", null, baseTimestamp + 2);
        if (leftVersioned) {
            assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1|rhs1,rhsValue1)"))));
            }
        } else {
            assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null))));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs2", "(lhsValue2_ooo|rhs1,rhsValue1)"))));
            }
        }
        left.pipeInput("lhs1", "lhsValue1_new|rhs1", baseTimestamp + 8);
        left.pipeInput("lhs2", "lhsValue2_new|rhs1", baseTimestamp + 8);
        {
            final Map<String, String> expected = mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1)"));
            assertThat(outputTopic.readKeyValuesToMap(), is(expected));
            if (materialized) {
                assertThat(asMap(store), is(expected));
            }
        }
        right.pipeInput("rhs1", "rhsValue1_ooo", baseTimestamp + 1);
        if (rightVersioned) {
            assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1)"))));
            }
        } else {
            assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1_ooo)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1_ooo)"))));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1_ooo)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1_ooo)"))));
            }
        }
        right.pipeInput("rhs1", null, baseTimestamp + 1);
        if (rightVersioned) {
            assertThat(outputTopic.readKeyValuesToMap(), is(emptyMap()));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1)"))));
            }
        } else {
            if (leftJoin) {
                assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,null)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,null)"))));
                if (materialized) {
                    assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,null)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,null)"))));
                }
            } else {
                assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", null), mkEntry("lhs2", null))));
                if (materialized) {
                    assertThat(asMap(store), is(emptyMap()));
                }
            }
        }
        right.pipeInput("rhs1", "rhsValue1_new", baseTimestamp + 6);
        {
            assertThat(outputTopic.readKeyValuesToMap(), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1_new)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1_new)"))));
            if (materialized) {
                assertThat(asMap(store), is(mkMap(mkEntry("lhs1", "(lhsValue1_new|rhs1,rhsValue1_new)"), mkEntry("lhs2", "(lhsValue2_new|rhs1,rhsValue1_new)"))));
            }
        }
    }
}
212993.2138177wildfly
private void parseXADataSource_3_0(XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final XaDataSource.Attribute attribute = XaDataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case CONNECTABLE:
                {
                    final String value = rawAttributeText(reader, CONNECTABLE.getXmlName());
                    if (value != null) {
                        CONNECTABLE.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case TRACKING:
                {
                    final String value = rawAttributeText(reader, TRACKING.getXmlName());
                    if (value != null) {
                        TRACKING.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(XA_DATASOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> xadatasourcePropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.XA_DATASOURCE) {
                        list.add(operation);
                        list.addAll(xadatasourcePropertiesOperations);
                        return;
                    } else {
                        if (XaDataSource.Tag.forName(reader.getLocalName()) == XaDataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(XaDataSource.Tag.forName(reader.getLocalName())) {
                        case XA_DATASOURCE_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(XADATASOURCE_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                XADATASOURCE_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                xadatasourcePropertiesOperations.add(configOperation);
                                break;
                            }
                        case XA_DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                XA_DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case XA_POOL:
                            {
                                parseXaPool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_PROPERTY:
                            {
                                String value = rawElementText(reader);
                                URL_PROPERTY.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
213689.137164wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final EEModuleDescription eeModuleDescription = deploymentUnit.getAttachment(EE_MODULE_DESCRIPTION);
    if (eeModuleDescription == null) {
        return;
    }
    final Collection<ComponentDescription> componentDescriptions = eeModuleDescription.getComponentDescriptions();
    if (componentDescriptions == null || componentDescriptions.isEmpty()) {
        return;
    }
    final String defaultSecurityDomain;
    if (eeModuleDescription.getDefaultSecurityDomain() == null) {
        defaultSecurityDomain = this.defaultSecurityDomainName.get();
    } else {
        defaultSecurityDomain = eeModuleDescription.getDefaultSecurityDomain();
    }
    final CapabilityServiceSupport support = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.CAPABILITY_SERVICE_SUPPORT);
    final SecurityMetaData securityMetaData = deploymentUnit.getAttachment(ATTACHMENT_KEY);
    ServiceName elytronDomainServiceName = securityMetaData != null ? securityMetaData.getSecurityDomain() : null;
    final ServiceName ejbSecurityDomainServiceName = deploymentUnit.getServiceName().append(EJBSecurityDomainService.SERVICE_NAME);
    final ApplicationSecurityDomainConfig defaultDomainMapping = this.apply(defaultSecurityDomain);
    final ServiceName defaultElytronDomainServiceName;
    if (defaultDomainMapping != null) {
        defaultElytronDomainServiceName = support.getCapabilityServiceName(ApplicationSecurityDomainDefinition.APPLICATION_SECURITY_DOMAIN_CAPABILITY_NAME, defaultSecurityDomain).append("security-domain");
    } else {
        defaultElytronDomainServiceName = null;
    }
    ApplicationSecurityDomainConfig selectedElytronDomainConfig = null;
    VirtualDomainMetaData virtualDomainMetaData = null;
    boolean isDefinedSecurityDomainVirtual = false;
    if (elytronDomainServiceName == null) {
        String selectedElytronDomainName = null;
        boolean legacyDomainDefined = false;
        boolean defaultRequired = false;
        for (ComponentDescription componentDescription : componentDescriptions) {
            if (componentDescription instanceof EJBComponentDescription) {
                EJBComponentDescription ejbComponentDescription = (EJBComponentDescription) componentDescription;
                ejbComponentDescription.setDefaultSecurityDomain(defaultSecurityDomain);
                String definedSecurityDomain = ejbComponentDescription.getDefinedSecurityDomain();
                defaultRequired = defaultRequired || definedSecurityDomain == null;
                ApplicationSecurityDomainConfig definedDomainMapping = definedSecurityDomain != null ? this.apply(definedSecurityDomain) : null;
                if (definedDomainMapping != null) {
                    if (selectedElytronDomainName == null) {
                        selectedElytronDomainName = definedSecurityDomain;
                        selectedElytronDomainConfig = definedDomainMapping;
                    } else if (selectedElytronDomainName.equals(definedSecurityDomain) == false) {
                        throw EjbLogger.ROOT_LOGGER.multipleSecurityDomainsDetected();
                    }
                } else if (definedSecurityDomain != null) {
                    virtualDomainMetaData = getVirtualDomainMetaData(definedSecurityDomain, phaseContext);
                    if (virtualDomainMetaData != null) {
                        elytronDomainServiceName = VirtualDomainMarkerUtility.virtualDomainName(definedSecurityDomain);
                        isDefinedSecurityDomainVirtual = true;
                    }
                    if (elytronDomainServiceName != null) {
                        selectedElytronDomainName = definedSecurityDomain;
                    } else {
                        legacyDomainDefined = true;
                    }
                }
            }
        }
        final boolean useDefaultElytronMapping;
        if (defaultRequired && selectedElytronDomainName == null) {
            DeploymentUnit topLevelDeployment = toRoot(deploymentUnit);
            final SecurityMetaData topLevelSecurityMetaData = topLevelDeployment.getAttachment(ATTACHMENT_KEY);
            ServiceName topLevelElytronDomainServiceName = topLevelSecurityMetaData != null ? topLevelSecurityMetaData.getSecurityDomain() : null;
            if (topLevelElytronDomainServiceName != null) {
                elytronDomainServiceName = topLevelElytronDomainServiceName;
                useDefaultElytronMapping = true;
            } else if (defaultDomainMapping != null) {
                selectedElytronDomainName = defaultSecurityDomain;
                selectedElytronDomainConfig = defaultDomainMapping;
                elytronDomainServiceName = defaultElytronDomainServiceName;
                useDefaultElytronMapping = !legacyDomainDefined;
            } else {
                useDefaultElytronMapping = false;
            }
        } else {
            useDefaultElytronMapping = false;
        }
        if (selectedElytronDomainConfig != null) {
            final EJBSecurityDomainService ejbSecurityDomainService = new EJBSecurityDomainService(deploymentUnit);
            ServiceName applicationSecurityDomainServiceName = support.getCapabilityServiceName(ApplicationSecurityDomainDefinition.APPLICATION_SECURITY_DOMAIN_CAPABILITY_NAME, selectedElytronDomainName);
            elytronDomainServiceName = applicationSecurityDomainServiceName.append("security-domain");
            final ServiceBuilder<Void> builder = phaseContext.getServiceTarget().addService(ejbSecurityDomainServiceName, ejbSecurityDomainService).addDependency(applicationSecurityDomainServiceName, ApplicationSecurityDomain.class, ejbSecurityDomainService.getApplicationSecurityDomainInjector());
            builder.install();
            for (final ComponentDescription componentDescription : componentDescriptions) {
                if (componentDescription instanceof EJBComponentDescription) {
                    EJBComponentDescription ejbComponentDescription = (EJBComponentDescription) componentDescription;
                    String definedSecurityDomain = ejbComponentDescription.getDefinedSecurityDomain();
                    if (useDefaultElytronMapping || selectedElytronDomainName.equals(definedSecurityDomain)) {
                        ejbComponentDescription.setOutflowSecurityDomainsConfigured(this);
                        ejbComponentDescription.setSecurityDomainServiceName(elytronDomainServiceName);
                        ejbComponentDescription.setRequiresJacc(selectedElytronDomainConfig.isEnableJacc());
                        ejbComponentDescription.setLegacyCompliantPrincipalPropagation(selectedElytronDomainConfig.isLegacyCompliantPrincipalPropagation());
                        ejbComponentDescription.getConfigurators().add((context, description, configuration) -> configuration.getCreateDependencies().add((serviceBuilder, service) -> serviceBuilder.requires(ejbSecurityDomainServiceName)));
                    } else if (definedSecurityDomain == null && defaultDomainMapping != null) {
                        ejbComponentDescription.setOutflowSecurityDomainsConfigured(this);
                        ejbComponentDescription.setSecurityDomainServiceName(defaultElytronDomainServiceName);
                        ejbComponentDescription.setRequiresJacc(defaultDomainMapping.isEnableJacc());
                        ejbComponentDescription.setLegacyCompliantPrincipalPropagation(defaultDomainMapping.isLegacyCompliantPrincipalPropagation());
                        ejbComponentDescription.getConfigurators().add((context, description, configuration) -> configuration.getCreateDependencies().add((serviceBuilder, service) -> serviceBuilder.requires(ejbSecurityDomainServiceName)));
                    }
                }
            }
        } else if (elytronDomainServiceName != null) {
            final EJBSecurityDomainService ejbSecurityDomainService = new EJBSecurityDomainService(deploymentUnit);
            if (isDefinedSecurityDomainVirtual && !VirtualDomainUtil.isVirtualDomainCreated(deploymentUnit)) {
                VirtualDomainUtil.createVirtualDomain(phaseContext.getServiceRegistry(), virtualDomainMetaData, elytronDomainServiceName, phaseContext.getServiceTarget());
            }
            final ServiceBuilder<Void> builder = phaseContext.getServiceTarget().addService(ejbSecurityDomainServiceName, ejbSecurityDomainService).addDependency(elytronDomainServiceName, SecurityDomain.class, ejbSecurityDomainService.getSecurityDomainInjector());
            builder.install();
            for (final ComponentDescription componentDescription : componentDescriptions) {
                if (componentDescription instanceof EJBComponentDescription) {
                    EJBComponentDescription ejbComponentDescription = (EJBComponentDescription) componentDescription;
                    ejbComponentDescription.setSecurityDomainServiceName(elytronDomainServiceName);
                    ejbComponentDescription.setOutflowSecurityDomainsConfigured(this);
                    componentDescription.getConfigurators().add((context, description, configuration) -> configuration.getCreateDependencies().add((serviceBuilder, service) -> serviceBuilder.requires(ejbSecurityDomainServiceName)));
                }
            }
        }
    } else {
        final EJBSecurityDomainService ejbSecurityDomainService = new EJBSecurityDomainService(deploymentUnit);
        final ServiceBuilder<Void> builder = phaseContext.getServiceTarget().addService(ejbSecurityDomainServiceName, ejbSecurityDomainService).addDependency(elytronDomainServiceName, SecurityDomain.class, ejbSecurityDomainService.getSecurityDomainInjector());
        builder.install();
        for (ComponentDescription componentDescription : componentDescriptions) {
            if (componentDescription instanceof EJBComponentDescription) {
                EJBComponentDescription ejbComponentDescription = (EJBComponentDescription) componentDescription;
                ejbComponentDescription.setSecurityDomainServiceName(elytronDomainServiceName);
                ejbComponentDescription.setOutflowSecurityDomainsConfigured(this);
                componentDescription.getConfigurators().add((context, description, configuration) -> configuration.getCreateDependencies().add((serviceBuilder, service) -> serviceBuilder.requires(ejbSecurityDomainServiceName)));
            }
        }
    }
}
216103.0516195wildfly
private void testFailover(Lifecycle lifecycle, URL baseURL1, URL baseURL2, URL baseURL3) throws Exception {
    URI uri1 = SimpleServlet.createURI(baseURL1);
    URI uri2 = SimpleServlet.createURI(baseURL2);
    URI uri3 = SimpleServlet.createURI(baseURL3);
    this.establishTopology(baseURL1, THREE_NODES);
    int value = 1;
    String lastOwner;
    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            Assert.assertNotNull(entry);
            Assert.assertEquals(NODE_1, entry.getValue());
            lastOwner = entry.getValue();
            Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
        }
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri2))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_2, entry.getValue());
                lastOwner = entry.getValue();
            } else {
                Assert.assertNull(entry);
            }
        }
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_3, entry.getValue());
                lastOwner = entry.getValue();
            } else {
                Assert.assertNull(entry);
            }
        }
        this.nonTxWait.run();
        lifecycle.stop(NODE_1);
        this.establishTopology(baseURL2, NODE_2, NODE_3);
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri2))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            Assert.assertNotNull(entry);
            Assert.assertNotEquals(lastOwner, entry.getValue());
            lastOwner = entry.getValue();
            Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri2))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_3, entry.getValue());
            } else {
                Assert.assertNull(entry);
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            Assert.assertNull(entry);
        }
        lifecycle.start(NODE_1);
        this.establishTopology(baseURL2, THREE_NODES);
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri2))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_2, entry.getValue());
            } else if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri2))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            Assert.assertNull(entry);
        }
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_3, entry.getValue());
            } else {
                Assert.assertNull(entry);
            }
        }
        this.nonTxWait.run();
        lifecycle.stop(NODE_2);
        this.establishTopology(baseURL1, NODE_1, NODE_3);
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_3, entry.getValue());
            } else {
                if (entry != null) {
                    Assert.assertNotEquals(lastOwner, entry.getValue());
                    lastOwner = entry.getValue();
                    Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
                }
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri3))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        lifecycle.start(NODE_2);
        this.establishTopology(baseURL1, THREE_NODES);
        this.nonTxWait.run();
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value++, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (!this.cacheMode.needsStateTransfer()) {
                Assert.assertNotNull(entry);
                Assert.assertEquals(NODE_1, entry.getValue());
            } else if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpGet(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertEquals(value, Integer.parseInt(response.getFirstHeader(SimpleServlet.VALUE_HEADER).getValue()));
            Map.Entry<String, String> entry = parseSessionRoute(response);
            if (entry != null) {
                Assert.assertNotEquals(lastOwner, entry.getValue());
                lastOwner = entry.getValue();
                Assert.assertEquals(entry.getKey(), response.getFirstHeader(SimpleServlet.SESSION_ID_HEADER).getValue());
            }
        }
        try (CloseableHttpResponse response = client.execute(new HttpDelete(uri1))) {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
        }
    }
}
223758.681246elasticsearch
public void testParseOGCPolygonWithHoles() throws IOException, ParseException {
    String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(-172.0).value(8.0).endArray().startArray().value(174.0).value(10.0).endArray().startArray().value(-172.0).value(-8.0).endArray().startArray().value(-172.0).value(8.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(176.0).value(15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(-177.0).value(10.0).endArray().endArray().startArray().startArray().value(178.0).value(8.0).endArray().startArray().value(-178.0).value(8.0).endArray().startArray().value(-180.0).value(-8.0).endArray().startArray().value(178.0).value(8.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(180.0).value(10.0).endArray().startArray().value(179.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().startArray().startArray().value(177.0).value(8.0).endArray().startArray().value(179.0).value(10.0).endArray().startArray().value(179.0).value(-8.0).endArray().startArray().value(177.0).value(8.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(183.0).value(10.0).endArray().startArray().value(183.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().startArray().value(183.0).value(10.0).endArray().endArray().startArray().startArray().value(178.0).value(8.0).endArray().startArray().value(182.0).value(8.0).endArray().startArray().value(180.0).value(-8.0).endArray().startArray().value(178.0).value(8.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
}
222402.9241161elasticsearch
public void add(CommonStats stats) {
    if (docs == null) {
        if (stats.getDocs() != null) {
            docs = new DocsStats();
            docs.add(stats.getDocs());
        }
    } else {
        docs.add(stats.getDocs());
    }
    if (store == null) {
        if (stats.getStore() != null) {
            store = new StoreStats();
            store.add(stats.getStore());
        }
    } else {
        store.add(stats.getStore());
    }
    if (indexing == null) {
        if (stats.getIndexing() != null) {
            indexing = new IndexingStats();
            indexing.add(stats.getIndexing());
        }
    } else {
        indexing.add(stats.getIndexing());
    }
    if (get == null) {
        if (stats.getGet() != null) {
            get = new GetStats();
            get.add(stats.getGet());
        }
    } else {
        get.add(stats.getGet());
    }
    if (search == null) {
        if (stats.getSearch() != null) {
            search = new SearchStats();
            search.add(stats.getSearch());
        }
    } else {
        search.add(stats.getSearch());
    }
    if (merge == null) {
        if (stats.getMerge() != null) {
            merge = new MergeStats();
            merge.add(stats.getMerge());
        }
    } else {
        merge.add(stats.getMerge());
    }
    if (refresh == null) {
        if (stats.getRefresh() != null) {
            refresh = new RefreshStats();
            refresh.add(stats.getRefresh());
        }
    } else {
        refresh.add(stats.getRefresh());
    }
    if (flush == null) {
        if (stats.getFlush() != null) {
            flush = new FlushStats();
            flush.add(stats.getFlush());
        }
    } else {
        flush.add(stats.getFlush());
    }
    if (warmer == null) {
        if (stats.getWarmer() != null) {
            warmer = new WarmerStats();
            warmer.add(stats.getWarmer());
        }
    } else {
        warmer.add(stats.getWarmer());
    }
    if (queryCache == null) {
        if (stats.getQueryCache() != null) {
            queryCache = new QueryCacheStats();
            queryCache.add(stats.getQueryCache());
        }
    } else {
        queryCache.add(stats.getQueryCache());
    }
    if (fieldData == null) {
        if (stats.getFieldData() != null) {
            fieldData = new FieldDataStats();
            fieldData.add(stats.getFieldData());
        }
    } else {
        fieldData.add(stats.getFieldData());
    }
    if (completion == null) {
        if (stats.getCompletion() != null) {
            completion = new CompletionStats();
            completion.add(stats.getCompletion());
        }
    } else {
        completion.add(stats.getCompletion());
    }
    if (segments == null) {
        if (stats.getSegments() != null) {
            segments = new SegmentsStats();
            segments.add(stats.getSegments());
        }
    } else {
        segments.add(stats.getSegments());
    }
    if (translog == null) {
        if (stats.getTranslog() != null) {
            translog = new TranslogStats();
            translog.add(stats.getTranslog());
        }
    } else {
        translog.add(stats.getTranslog());
    }
    if (requestCache == null) {
        if (stats.getRequestCache() != null) {
            requestCache = new RequestCacheStats();
            requestCache.add(stats.getRequestCache());
        }
    } else {
        requestCache.add(stats.getRequestCache());
    }
    if (recoveryStats == null) {
        if (stats.getRecoveryStats() != null) {
            recoveryStats = new RecoveryStats();
            recoveryStats.add(stats.getRecoveryStats());
        }
    } else {
        recoveryStats.add(stats.getRecoveryStats());
    }
    if (bulk == null) {
        if (stats.getBulk() != null) {
            bulk = new BulkStats();
            bulk.add(stats.getBulk());
        }
    } else {
        bulk.add(stats.getBulk());
    }
    if (stats.shards != null) {
        if (shards == null) {
            shards = stats.shards;
        } else {
            shards = shards.add(stats.shards);
        }
    }
    if (stats.getNodeMappings() != null) {
        if (nodeMappings == null) {
            nodeMappings = new NodeMappingStats();
            nodeMappings.add(stats.getNodeMappings());
        } else {
            nodeMappings.add(stats.getNodeMappings());
        }
    }
    if (denseVectorStats == null) {
        if (stats.getDenseVectorStats() != null) {
            denseVectorStats = new DenseVectorStats();
            denseVectorStats.add(stats.getDenseVectorStats());
        }
    } else {
        denseVectorStats.add(stats.getDenseVectorStats());
    }
}
225278.1723157elasticsearch
public void testGetDesiredBalance() throws Exception {
    Set<String> nodeIds = randomUnique(() -> randomAlphaOfLength(8), randomIntBetween(1, 32));
    DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder();
    for (String nodeId : nodeIds) {
        discoveryNodes.add(newNode(nodeId, Set.of(DiscoveryNodeRole.DATA_ROLE)));
    }
    Metadata.Builder metadataBuilder = metadataWithConfiguredAllocator(DESIRED_BALANCE_ALLOCATOR);
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    for (int i = 0; i < randomInt(8); i++) {
        String indexName = randomAlphaOfLength(8);
        Settings.Builder settings = indexSettings(IndexVersion.current(), 1, 0);
        if (randomBoolean()) {
            settings.put(DataTier.TIER_PREFERENCE_SETTING.getKey(), randomFrom("data_hot", "data_warm", "data_cold"));
        }
        IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).settings(settings);
        if (randomBoolean()) {
            indexMetadataBuilder.indexWriteLoadForecast(randomDoubleBetween(0.0, 8.0, true));
        }
        if (randomBoolean()) {
            indexMetadataBuilder.shardSizeInBytesForecast(randomLongBetween(0, 1024));
        }
        IndexMetadata indexMetadata = indexMetadataBuilder.build();
        Index index = indexMetadata.getIndex();
        metadataBuilder.put(indexMetadata, false);
        IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index);
        for (int j = 0; j < randomIntBetween(1, 16); j++) {
            String nodeId = randomFrom(nodeIds);
            switch(randomInt(3)) {
                case 0 ->
                    indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting(new ShardId(index, j), nodeId, true, ShardRoutingState.STARTED));
                case 1 ->
                    {
                        indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting(new ShardId(index, j), nodeId, true, ShardRoutingState.STARTED));
                        if (nodeIds.size() > 1) {
                            indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting(new ShardId(index, j), randomValueOtherThan(nodeId, () -> randomFrom(nodeIds)), false, ShardRoutingState.STARTED));
                        }
                    }
                case 2 ->
                    indexRoutingTableBuilder.addShard(TestShardRouting.newShardRouting(new ShardId(index, j), null, true, ShardRoutingState.UNASSIGNED));
                case 3 ->
                    {
                        ShardRouting shard = TestShardRouting.newShardRouting(new ShardId(index, j), nodeId, true, ShardRoutingState.STARTED);
                        if (nodeIds.size() > 1) {
                            shard = shard.relocate(randomValueOtherThan(nodeId, () -> randomFrom(nodeIds)), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
                        }
                        indexRoutingTableBuilder.addShard(shard);
                    }
            }
        }
        routingTableBuilder.add(indexRoutingTableBuilder.build());
    }
    RoutingTable routingTable = routingTableBuilder.build();
    List<ShardId> shardIds = routingTable.allShards().map(ShardRouting::shardId).toList();
    Map<String, Set<ShardId>> indexShards = shardIds.stream().collect(Collectors.groupingBy(e -> e.getIndex().getName(), Collectors.toSet()));
    Map<ShardId, ShardAssignment> shardAssignments = new HashMap<>();
    if (shardIds.size() > 0) {
        for (int i = 0; i < randomInt(8); i++) {
            int total = randomIntBetween(1, 1024);
            Set<String> shardNodeIds = randomUnique(() -> randomFrom(nodeIds), randomInt(8));
            shardAssignments.put(randomFrom(shardIds), new ShardAssignment(shardNodeIds, total, total - shardNodeIds.size(), randomInt(1024)));
        }
    }
    when(desiredBalanceShardsAllocator.getDesiredBalance()).thenReturn(new DesiredBalance(randomInt(1024), shardAssignments));
    DesiredBalanceStats desiredBalanceStats = randomDesiredBalanceStats();
    when(desiredBalanceShardsAllocator.getStats()).thenReturn(desiredBalanceStats);
    ClusterInfo clusterInfo = ClusterInfo.EMPTY;
    when(clusterInfoService.getClusterInfo()).thenReturn(clusterInfo);
    var clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadataBuilder.build()).nodes(discoveryNodes.build()).routingTable(routingTable).build();
    final var desiredBalanceResponse = executeAction(clusterState);
    assertThat(desiredBalanceResponse.getStats(), equalTo(desiredBalanceStats));
    assertThat(desiredBalanceResponse.getClusterBalanceStats(), notNullValue());
    assertThat(desiredBalanceResponse.getClusterInfo(), equalTo(clusterInfo));
    assertEquals(indexShards.keySet(), desiredBalanceResponse.getRoutingTable().keySet());
    assertEquals(desiredBalanceResponse, copyWriteable(desiredBalanceResponse, writableRegistry(), DesiredBalanceResponse::from));
    AbstractChunkedSerializingTestCase.assertChunkCount(desiredBalanceResponse, response -> 3 + ClusterInfoTests.getChunkCount(response.getClusterInfo()) + response.getRoutingTable().values().stream().mapToInt(indexEntry -> 2 + indexEntry.values().stream().mapToInt(shardEntry -> 3 + shardEntry.current().size()).sum()).sum());
    for (var e : desiredBalanceResponse.getRoutingTable().entrySet()) {
        String index = e.getKey();
        Map<Integer, DesiredBalanceResponse.DesiredShards> shardsMap = e.getValue();
        assertEquals(indexShards.get(index).stream().map(ShardId::id).collect(Collectors.toSet()), shardsMap.keySet());
        for (var shardDesiredBalance : shardsMap.entrySet()) {
            DesiredBalanceResponse.DesiredShards desiredShard = shardDesiredBalance.getValue();
            int shardId = shardDesiredBalance.getKey();
            IndexMetadata indexMetadata = clusterState.metadata().index(index);
            IndexShardRoutingTable indexShardRoutingTable = clusterState.getRoutingTable().shardRoutingTable(index, shardId);
            for (int idx = 0; idx < indexShardRoutingTable.size(); idx++) {
                ShardRouting shard = indexShardRoutingTable.shard(idx);
                DesiredBalanceResponse.ShardView shardView = desiredShard.current().get(idx);
                assertEquals(shard.state(), shardView.state());
                assertEquals(shard.primary(), shardView.primary());
                assertEquals(shard.currentNodeId(), shardView.node());
                assertEquals(shard.relocatingNodeId(), shardView.relocatingNode());
                assertEquals(shard.index().getName(), shardView.index());
                assertEquals(shard.shardId().id(), shardView.shardId());
                var forecastedWriteLoad = TEST_WRITE_LOAD_FORECASTER.getForecastedWriteLoad(indexMetadata);
                assertEquals(forecastedWriteLoad.isPresent() ? forecastedWriteLoad.getAsDouble() : null, shardView.forecastWriteLoad());
                var forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes();
                assertEquals(forecastedShardSizeInBytes.isPresent() ? forecastedShardSizeInBytes.getAsLong() : null, shardView.forecastShardSizeInBytes());
                Set<String> desiredNodeIds = Optional.ofNullable(shardAssignments.get(shard.shardId())).map(ShardAssignment::nodeIds).orElse(Set.of());
                assertEquals(shard.currentNodeId() != null && desiredNodeIds.contains(shard.currentNodeId()), shardView.nodeIsDesired());
                assertEquals(shard.relocatingNodeId() != null ? desiredNodeIds.contains(shard.relocatingNodeId()) : null, shardView.relocatingNodeIsDesired());
                assertEquals(indexMetadata.getTierPreference(), shardView.tierPreference());
            }
            final var shardAssignment = shardAssignments.get(indexShardRoutingTable.shardId());
            if (shardAssignment == null) {
                assertSame(desiredShard.desired(), DesiredBalanceResponse.ShardAssignmentView.EMPTY);
            } else {
                assertEquals(shardAssignment.nodeIds(), desiredShard.desired().nodeIds());
                assertEquals(shardAssignment.total(), desiredShard.desired().total());
                assertEquals(shardAssignment.unassigned(), desiredShard.desired().unassigned());
                assertEquals(shardAssignment.ignored(), desiredShard.desired().ignored());
            }
        }
    }
}
224853.5331153elasticsearch
private List<ClusterState> generateClusterStates(final ClusterState originalClusterState, final String indexName, final int numberOfReplicas, final boolean withPrimaryAllocationFailures) {
    final Set<String> nodeIds = new HashSet<>();
    final int numNodes = randomIntBetween(numberOfReplicas + 1, 10);
    for (int i = 0; i < numNodes; i++) {
        nodeIds.add(randomAlphaOfLength(8));
    }
    final List<ClusterState> clusterStates = new ArrayList<>();
    clusterStates.add(originalClusterState);
    ClusterState clusterState = originalClusterState;
    RoutingTable routingTable = originalClusterState.routingTable();
    IndexRoutingTable indexRoutingTable = routingTable.index(indexName);
    IndexRoutingTable.Builder newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary()) {
                newIndexRoutingTable.addShard(shardRouting.initialize(randomFrom(nodeIds), null, shardRouting.getExpectedShardSize()));
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
    clusterStates.add(clusterState);
    indexRoutingTable = routingTable.index(indexName);
    newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    Map<Integer, Set<String>> allocationIds = new HashMap<>();
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary() && randomBoolean()) {
                final ShardRouting newShardRouting = shardRouting.moveToStarted(UNAVAILABLE_EXPECTED_SHARD_SIZE);
                allocationIds.put(newShardRouting.getId(), Set.of(newShardRouting.allocationId().getId()));
                newIndexRoutingTable.addShard(newShardRouting);
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
    final IndexMetadata.Builder idxMetaBuilder = IndexMetadata.builder(clusterState.metadata().index(indexName));
    allocationIds.forEach(idxMetaBuilder::putInSyncAllocationIds);
    Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).put(idxMetaBuilder);
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metadata(metadataBuilder).build();
    clusterStates.add(clusterState);
    if (withPrimaryAllocationFailures) {
        boolean alreadyFailedPrimary = false;
        indexRoutingTable = routingTable.index(indexName);
        newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
        for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
            IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
            for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
                ShardRouting shardRouting = shardRoutingTable.shard(copy);
                if (shardRouting.primary() && (shardRouting.started() == false || alreadyFailedPrimary == false)) {
                    newIndexRoutingTable.addShard(shardRouting.moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "unlucky shard")));
                    alreadyFailedPrimary = true;
                } else {
                    newIndexRoutingTable.addShard(shardRouting);
                }
            }
        }
        routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
        clusterStates.add(ClusterState.builder(clusterState).routingTable(routingTable).build());
        return clusterStates;
    }
    indexRoutingTable = routingTable.index(indexName);
    newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    allocationIds = new HashMap<>();
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary() && shardRouting.started() == false) {
                final ShardRouting newShardRouting = shardRouting.moveToStarted(UNAVAILABLE_EXPECTED_SHARD_SIZE);
                allocationIds.put(newShardRouting.getId(), Set.of(newShardRouting.allocationId().getId()));
                newIndexRoutingTable.addShard(newShardRouting);
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
    final IndexMetadata.Builder idxMetaBuilder2 = IndexMetadata.builder(clusterState.metadata().index(indexName));
    allocationIds.forEach(idxMetaBuilder2::putInSyncAllocationIds);
    metadataBuilder = Metadata.builder(clusterState.metadata()).put(idxMetaBuilder2);
    clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metadata(metadataBuilder).build();
    clusterStates.add(clusterState);
    indexRoutingTable = routingTable.index(indexName);
    newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
        Set<String> allocatedNodes = new HashSet<>();
        allocatedNodes.add(primaryNodeId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary() == false) {
                String replicaNodeId = randomFrom(Sets.difference(nodeIds, allocatedNodes));
                newIndexRoutingTable.addShard(shardRouting.initialize(replicaNodeId, null, shardRouting.getExpectedShardSize()));
                allocatedNodes.add(replicaNodeId);
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
    clusterStates.add(ClusterState.builder(clusterState).routingTable(routingTable).build());
    indexRoutingTable = routingTable.index(indexName);
    newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary() == false && randomBoolean()) {
                newIndexRoutingTable.addShard(shardRouting.moveToStarted(UNAVAILABLE_EXPECTED_SHARD_SIZE));
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
    clusterStates.add(ClusterState.builder(clusterState).routingTable(routingTable).build());
    boolean replicaStateChanged = false;
    indexRoutingTable = routingTable.index(indexName);
    newIndexRoutingTable = IndexRoutingTable.builder(indexRoutingTable.getIndex());
    for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) {
        IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId);
        for (int copy = 0; copy < shardRoutingTable.size(); copy++) {
            ShardRouting shardRouting = shardRoutingTable.shard(copy);
            if (shardRouting.primary() == false && shardRouting.started() == false) {
                newIndexRoutingTable.addShard(shardRouting.moveToStarted(UNAVAILABLE_EXPECTED_SHARD_SIZE));
                replicaStateChanged = true;
            } else {
                newIndexRoutingTable.addShard(shardRouting);
            }
        }
    }
    if (replicaStateChanged) {
        routingTable = RoutingTable.builder(routingTable).add(newIndexRoutingTable).build();
        clusterStates.add(ClusterState.builder(clusterState).routingTable(routingTable).build());
    }
    return clusterStates;
}
223792.821243elasticsearch
public void testGenerateRequest() {
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), "{{leader_index}}_copy", Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), createDataStream(index));
        PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern);
        assertThat(request.getRemoteCluster(), equalTo("remote"));
        assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456"));
        assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy"));
    }
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), "copy_{{leader_index}}", Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), createDataStream(index));
        PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern);
        assertThat(request.getRemoteCluster(), equalTo("remote"));
        assertThat(request.getFollowerIndex(), equalTo(".ds-copy_logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getDataStreamName(), equalTo("copy_logs-foo-bar"));
    }
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), "{{leader_index}}_copy", Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index("my-backing-index", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), createDataStream(index));
        PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern);
        assertThat(request.getRemoteCluster(), equalTo("remote"));
        assertThat(request.getFollowerIndex(), equalTo("my-backing-index_copy"));
        assertThat(request.getLeaderIndex(), equalTo("my-backing-index"));
        assertThat(request.getDataStreamName(), equalTo("logs-foo-bar_copy"));
    }
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), "{{leader_index}}_copy", Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), null);
        PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern);
        assertThat(request.getRemoteCluster(), equalTo("remote"));
        assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar_copy-2022-02-01-123456"));
        assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getDataStreamName(), equalTo(null));
    }
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), null, Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index(".ds-logs-foo-bar-2022-02-01-123456", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), createDataStream(index));
        PutFollowAction.Request request = AutoFollower.generateRequest("remote", index, indexAbstraction, pattern);
        assertThat(request.getRemoteCluster(), equalTo("remote"));
        assertThat(request.getFollowerIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getLeaderIndex(), equalTo(".ds-logs-foo-bar-2022-02-01-123456"));
        assertThat(request.getDataStreamName(), equalTo(null));
    }
    {
        AutoFollowPattern pattern = new AutoFollowPattern("remote", List.of("logs-*"), List.of(), "{{leader_index}}_copy", Settings.EMPTY, true, null, null, null, null, null, null, null, null, null, null);
        Index index = new Index("my-.ds-backing-index", "uuid");
        IndexAbstraction indexAbstraction = new IndexAbstraction.ConcreteIndex(IndexMetadata.builder(index.getName()).settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build()).build(), createDataStream(index));
        IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> AutoFollower.generateRequest("remote", index, indexAbstraction, pattern));
        assertThat(e.getMessage(), containsString("unable to determine follower index name from leader index name " + "[my-.ds-backing-index] and follow index pattern: [{{leader_index}}_copy]" + ", index appears to follow a regular data stream backing pattern, but could not be parsed"));
    }
}
223640.1720192elasticsearch
protected void masterOperation(Task task, PutTrainedModelAction.Request request, ClusterState state, ActionListener<Response> finalResponseListener) {
    TrainedModelConfig config = request.getTrainedModelConfig();
    try {
        if (request.isDeferDefinitionDecompression() == false) {
            config.ensureParsedDefinition(xContentRegistry);
        }
    } catch (IOException ex) {
        finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Failed to parse definition for [{}]", ex, config.getModelId()));
        return;
    }
    boolean hasModelDefinition = config.getModelDefinition() != null;
    if (hasModelDefinition) {
        try {
            config.getModelDefinition().getTrainedModel().validate();
        } catch (ElasticsearchException ex) {
            finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Definition for [{}] has validation failures.", ex, config.getModelId()));
            return;
        }
        TrainedModelType trainedModelType = TrainedModelType.typeFromTrainedModel(config.getModelDefinition().getTrainedModel());
        if (trainedModelType == null) {
            finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Unknown trained model definition class [{}]", config.getModelDefinition().getTrainedModel().getName()));
            return;
        }
        if (config.getModelType() == null) {
            config = new TrainedModelConfig.Builder(config).setModelType(trainedModelType).build();
        } else if (trainedModelType != config.getModelType()) {
            finalResponseListener.onFailure(ExceptionsHelper.badRequestException("{} [{}] does not match the model definition type [{}]", TrainedModelConfig.MODEL_TYPE.getPreferredName(), config.getModelType(), trainedModelType));
            return;
        }
        if (config.getInferenceConfig().isTargetTypeSupported(config.getModelDefinition().getTrainedModel().targetType()) == false) {
            finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Model [{}] inference config type [{}] does not support definition target type [{}]", config.getModelId(), config.getInferenceConfig().getName(), config.getModelDefinition().getTrainedModel().targetType()));
            return;
        }
        TransportVersion minCompatibilityVersion = config.getModelDefinition().getTrainedModel().getMinimalCompatibilityVersion();
        if (state.getMinTransportVersion().before(minCompatibilityVersion)) {
            finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Cannot create model [{}] while cluster upgrade is in progress.", config.getModelId()));
            return;
        }
    }
    TrainedModelConfig.Builder trainedModelConfig = new TrainedModelConfig.Builder(config).setVersion(MlConfigVersion.CURRENT).setCreateTime(Instant.now()).setCreatedBy("api_user").setLicenseLevel(License.OperationMode.PLATINUM.description());
    AtomicReference<ModelPackageConfig> modelPackageConfigHolder = new AtomicReference<>();
    if (hasModelDefinition) {
        trainedModelConfig.setModelSize(config.getModelDefinition().ramBytesUsed()).setEstimatedOperations(config.getModelDefinition().getTrainedModel().estimatedNumOperations());
    } else {
        trainedModelConfig.setLocation(Optional.ofNullable(config.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE).getDefaultLocation(config.getModelId()));
    }
    if (ModelAliasMetadata.fromState(state).getModelId(trainedModelConfig.getModelId()) != null) {
        finalResponseListener.onFailure(ExceptionsHelper.badRequestException("requested model_id [{}] is the same as an existing model_alias. Model model_aliases and ids must be unique", config.getModelId()));
        return;
    }
    if (TrainedModelAssignmentMetadata.fromState(state).hasDeployment(trainedModelConfig.getModelId())) {
        finalResponseListener.onFailure(ExceptionsHelper.badRequestException("Cannot create model [{}] " + MODEL_ALREADY_EXISTS_ERROR_MESSAGE_FRAGMENT, config.getModelId()));
        return;
    }
    var isPackageModel = config.isPackagedModel();
    ActionListener<Void> checkStorageIndexSizeListener = finalResponseListener.<Boolean>delegateFailureAndWrap((delegate, bool) -> {
        TrainedModelConfig configToReturn = trainedModelConfig.clearDefinition().build();
        if (modelPackageConfigHolder.get() != null) {
            triggerModelFetchIfNecessary(configToReturn.getModelId(), modelPackageConfigHolder.get(), request.isWaitForCompletion(), delegate.<TrainedModelConfig>delegateFailureAndWrap((l, cfg) -> l.onResponse(new Response(cfg))).<TrainedModelConfig>delegateFailureAndWrap((l, cfg) -> verifyMlNodesAndModelArchitectures(cfg, client, threadPool, l)).delegateFailureAndWrap((l, downloadTriggered) -> l.onResponse(configToReturn)));
        } else {
            delegate.onResponse(new PutTrainedModelAction.Response(configToReturn));
        }
    }).delegateFailureAndWrap((l, r) -> trainedModelProvider.storeTrainedModel(trainedModelConfig.build(), l, isPackageModel));
    ActionListener<Void> tagsModelIdCheckListener = ActionListener.wrap(r -> {
        if (TrainedModelType.PYTORCH.equals(trainedModelConfig.getModelType())) {
            client.admin().indices().prepareStats(InferenceIndexConstants.nativeDefinitionStore()).clear().setStore(true).execute(ActionListener.wrap(stats -> {
                IndexStats indexStats = stats.getIndices().get(InferenceIndexConstants.nativeDefinitionStore());
                if (indexStats != null && indexStats.getTotal().getStore().sizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) {
                    finalResponseListener.onFailure(new ElasticsearchStatusException("Native model store has exceeded the maximum acceptable size of {}, " + "please delete older unused pytorch models", RestStatus.CONFLICT, MAX_NATIVE_DEFINITION_INDEX_SIZE.toString()));
                    return;
                }
                checkStorageIndexSizeListener.onResponse(null);
            }, e -> {
                if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
                    checkStorageIndexSizeListener.onResponse(null);
                    return;
                }
                finalResponseListener.onFailure(new ElasticsearchStatusException("Unable to calculate stats for definition storage index [{}], please try again later", RestStatus.SERVICE_UNAVAILABLE, e, InferenceIndexConstants.nativeDefinitionStore()));
            }));
            return;
        }
        checkStorageIndexSizeListener.onResponse(null);
    }, finalResponseListener::onFailure);
    ActionListener<Void> modelIdTagCheckListener = ActionListener.wrap(r -> checkTagsAgainstModelIds(request.getTrainedModelConfig().getTags(), tagsModelIdCheckListener), finalResponseListener::onFailure);
    ActionListener<Void> handlePackageAndTagsListener = ActionListener.wrap(r -> {
        if (isPackageModel) {
            resolvePackageConfig(trainedModelConfig.getModelId(), ActionListener.wrap(resolvedModelPackageConfig -> {
                try {
                    TrainedModelValidator.validatePackage(trainedModelConfig, resolvedModelPackageConfig, state);
                } catch (ValidationException e) {
                    finalResponseListener.onFailure(e);
                    return;
                }
                modelPackageConfigHolder.set(resolvedModelPackageConfig);
                setTrainedModelConfigFieldsFromPackagedModel(trainedModelConfig, resolvedModelPackageConfig, xContentRegistry);
                checkModelIdAgainstTags(trainedModelConfig.getModelId(), modelIdTagCheckListener);
            }, finalResponseListener::onFailure));
        } else {
            checkModelIdAgainstTags(trainedModelConfig.getModelId(), modelIdTagCheckListener);
        }
    }, finalResponseListener::onFailure);
    checkForExistingTask(client, trainedModelConfig.getModelId(), request.isWaitForCompletion(), finalResponseListener, handlePackageAndTagsListener, request.ackTimeout());
}
223470.719225elasticsearch
public Collection<Object> createComponents(PluginServices services) {
    if (enabled == false) {
        return Collections.emptyList();
    }
    Client client = services.client();
    ClusterService clusterService = services.clusterService();
    ThreadPool threadPool = services.threadPool();
    Environment environment = services.environment();
    ScriptService scriptService = services.scriptService();
    NamedXContentRegistry xContentRegistry = services.xContentRegistry();
    FeatureService featureService = services.featureService();
    Predicate<NodeFeature> clusterSupportsFeature = f -> {
        ClusterState state = clusterService.state();
        return state.clusterRecovered() && featureService.clusterHasFeature(state, f);
    };
    BodyPartSource.init();
    Account.init();
    final CryptoService cryptoService;
    try {
        cryptoService = ENCRYPT_SENSITIVE_DATA_SETTING.get(settings) ? new CryptoService(settings) : null;
    } catch (IOException e) {
        throw new UncheckedIOException(e);
    }
    WatcherIndexTemplateRegistry templateRegistry = new WatcherIndexTemplateRegistry(environment.settings(), clusterService, threadPool, client, xContentRegistry);
    templateRegistry.initialize();
    final SSLService sslService = getSslService();
    httpClient = new HttpClient(settings, sslService, cryptoService, clusterService);
    EmailService emailService = new EmailService(settings, cryptoService, sslService, clusterService.getClusterSettings());
    JiraService jiraService = new JiraService(settings, httpClient, clusterService.getClusterSettings());
    SlackService slackService = new SlackService(settings, httpClient, clusterService.getClusterSettings());
    PagerDutyService pagerDutyService = new PagerDutyService(settings, httpClient, clusterService.getClusterSettings());
    WebhookService webhookService = new WebhookService(settings, httpClient, clusterService.getClusterSettings());
    reloadableServices.add(emailService);
    reloadableServices.add(jiraService);
    reloadableServices.add(slackService);
    reloadableServices.add(pagerDutyService);
    reloadableServices.add(webhookService);
    TextTemplateEngine templateEngine = new TextTemplateEngine(scriptService);
    Map<String, EmailAttachmentParser<?>> emailAttachmentParsers = new HashMap<>();
    emailAttachmentParsers.put(HttpEmailAttachementParser.TYPE, new HttpEmailAttachementParser(webhookService, templateEngine));
    emailAttachmentParsers.put(DataAttachmentParser.TYPE, new DataAttachmentParser());
    emailAttachmentParsers.put(ReportingAttachmentParser.TYPE, new ReportingAttachmentParser(settings, webhookService, templateEngine, clusterService.getClusterSettings()));
    EmailAttachmentsParser emailAttachmentsParser = new EmailAttachmentsParser(emailAttachmentParsers);
    final ConditionRegistry conditionRegistry = new ConditionRegistry(Map.of(InternalAlwaysCondition.TYPE, (c, id, p) -> InternalAlwaysCondition.parse(id, p), NeverCondition.TYPE, (c, id, p) -> NeverCondition.parse(id, p), ArrayCompareCondition.TYPE, ArrayCompareCondition::parse, CompareCondition.TYPE, CompareCondition::parse, ScriptCondition.TYPE, (c, id, p) -> ScriptCondition.parse(scriptService, id, p)), getClock());
    final TransformRegistry transformRegistry = new TransformRegistry(Map.of(ScriptTransform.TYPE, new ScriptTransformFactory(scriptService), SearchTransform.TYPE, new SearchTransformFactory(settings, client, xContentRegistry, clusterSupportsFeature, scriptService)));
    final Map<String, ActionFactory> actionFactoryMap = new HashMap<>();
    actionFactoryMap.put(EmailAction.TYPE, new EmailActionFactory(settings, emailService, templateEngine, emailAttachmentsParser));
    actionFactoryMap.put(WebhookAction.TYPE, new WebhookActionFactory(webhookService, templateEngine));
    actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, client));
    actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(templateEngine));
    actionFactoryMap.put(JiraAction.TYPE, new JiraActionFactory(templateEngine, jiraService));
    actionFactoryMap.put(SlackAction.TYPE, new SlackActionFactory(templateEngine, slackService));
    actionFactoryMap.put(PagerDutyAction.TYPE, new PagerDutyActionFactory(templateEngine, pagerDutyService));
    final ActionRegistry registry = new ActionRegistry(actionFactoryMap, conditionRegistry, transformRegistry, getClock(), getLicenseState());
    final Map<String, InputFactory<?, ?, ?>> inputFactories = new HashMap<>();
    inputFactories.put(SearchInput.TYPE, new SearchInputFactory(settings, client, xContentRegistry, clusterSupportsFeature, scriptService));
    inputFactories.put(SimpleInput.TYPE, new SimpleInputFactory());
    inputFactories.put(HttpInput.TYPE, new HttpInputFactory(settings, httpClient, templateEngine));
    inputFactories.put(NoneInput.TYPE, new NoneInputFactory());
    inputFactories.put(TransformInput.TYPE, new TransformInputFactory(transformRegistry));
    final InputRegistry inputRegistry = new InputRegistry(inputFactories);
    inputFactories.put(ChainInput.TYPE, new ChainInputFactory(inputRegistry));
    bulkProcessor = BulkProcessor2.builder(new OriginSettingClient(client, WATCHER_ORIGIN)::bulk, new BulkProcessor2.Listener() {

        @Override
        public void beforeBulk(long executionId, BulkRequest request) {
        }

        @Override
        public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
            if (response.hasFailures()) {
                Map<String, String> triggeredFailures = Arrays.stream(response.getItems()).filter(BulkItemResponse::isFailed).filter(r -> r.getIndex().startsWith(TriggeredWatchStoreField.INDEX_NAME)).collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage));
                Map<String, String> historyFailures = Arrays.stream(response.getItems()).filter(BulkItemResponse::isFailed).filter(r -> r.getIndex().startsWith(DataStream.BACKING_INDEX_PREFIX + HistoryStoreField.DATA_STREAM)).collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage));
                if (triggeredFailures.isEmpty() == false) {
                    String failure = String.join(", ", triggeredFailures.values());
                    logger.error("triggered watches could not be deleted {}, failure [{}]", triggeredFailures.keySet(), Strings.substring(failure, 0, 2000));
                }
                if (historyFailures.isEmpty() == false) {
                    String failure = String.join(", ", historyFailures.values());
                    logger.error("watch history could not be written {}, failure [{}]", historyFailures.keySet(), Strings.substring(failure, 0, 2000));
                }
                Map<String, String> overwrittenIds = Arrays.stream(response.getItems()).filter(BulkItemResponse::isFailed).filter(r -> r.getIndex().startsWith(DataStream.BACKING_INDEX_PREFIX + HistoryStoreField.DATA_STREAM)).filter(r -> r.getVersion() > 1).collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage));
                if (overwrittenIds.isEmpty() == false) {
                    String failure = String.join(", ", overwrittenIds.values());
                    logger.info("overwrote watch history entries {}, possible second execution of a triggered watch, failure [{}]", overwrittenIds.keySet(), Strings.substring(failure, 0, 2000));
                }
            }
        }

        @Override
        public void afterBulk(long executionId, BulkRequest request, Exception failure) {
            logger.error("error executing bulk", failure);
        }
    }, client.threadPool()).setFlushInterval(SETTING_BULK_FLUSH_INTERVAL.get(settings)).setBulkActions(SETTING_BULK_ACTIONS.get(settings)).setBulkSize(SETTING_BULK_SIZE.get(settings)).build();
    HistoryStore historyStore = new HistoryStore(bulkProcessor);
    final Set<Schedule.Parser<?>> scheduleParsers = new HashSet<>();
    scheduleParsers.add(new CronSchedule.Parser());
    scheduleParsers.add(new DailySchedule.Parser());
    scheduleParsers.add(new HourlySchedule.Parser());
    scheduleParsers.add(new IntervalSchedule.Parser());
    scheduleParsers.add(new MonthlySchedule.Parser());
    scheduleParsers.add(new WeeklySchedule.Parser());
    scheduleParsers.add(new YearlySchedule.Parser());
    final ScheduleRegistry scheduleRegistry = new ScheduleRegistry(scheduleParsers);
    TriggerEngine<?, ?> manualTriggerEngine = new ManualTriggerEngine();
    final TriggerEngine<?, ?> configuredTriggerEngine = getTriggerEngine(getClock(), scheduleRegistry);
    final Set<TriggerEngine<?, ?>> triggerEngines = new HashSet<>();
    triggerEngines.add(manualTriggerEngine);
    triggerEngines.add(configuredTriggerEngine);
    final TriggerService triggerService = new TriggerService(triggerEngines);
    final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(triggerService);
    final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser, bulkProcessor);
    final WatcherSearchTemplateService watcherSearchTemplateService = new WatcherSearchTemplateService(scriptService, xContentRegistry, clusterSupportsFeature);
    final WatchExecutor watchExecutor = getWatchExecutor(threadPool);
    final WatchParser watchParser = new WatchParser(triggerService, registry, inputRegistry, cryptoService, getClock());
    final ExecutionService executionService = new ExecutionService(settings, historyStore, triggeredWatchStore, watchExecutor, getClock(), watchParser, clusterService, client, threadPool.generic());
    final Consumer<Iterable<TriggerEvent>> triggerEngineListener = getTriggerEngineListener(executionService);
    triggerService.register(triggerEngineListener);
    WatcherService watcherService = new WatcherService(settings, triggerService, triggeredWatchStore, executionService, watchParser, client);
    final WatcherLifeCycleService watcherLifeCycleService = new WatcherLifeCycleService(clusterService, watcherService);
    listener = new WatcherIndexingListener(watchParser, getClock(), triggerService, watcherLifeCycleService.getState());
    clusterService.addListener(listener);
    logger.info("Watcher initialized components at {}", WatcherDateTimeUtils.dateTimeFormatter.formatMillis(getClock().millis()));
    return Arrays.asList(new ClockHolder(getClock()), registry, inputRegistry, historyStore, triggerService, triggeredWatchParser, watcherLifeCycleService, executionService, triggerEngineListener, watcherService, watchParser, configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, slackService, pagerDutyService);
}
224043.4826176elasticsearch
public void testTemplateParse() throws Exception {
    ToXContent.Params params = ToXContent.EMPTY_PARAMS;
    XContentBuilder jsonBuilder = jsonBuilder();
    jsonBuilder.startObject();
    TextTemplate from = null;
    if (randomBoolean()) {
        from = new TextTemplate(randomAlphaOfLength(200));
        jsonBuilder.field("from", from, params);
    }
    TextTemplate[] to = null;
    if (randomBoolean()) {
        jsonBuilder.startArray("to");
        to = new TextTemplate[randomIntBetween(1, 3)];
        for (int i = 0; i < to.length; i++) {
            to[i] = new TextTemplate(randomAlphaOfLength(10));
            to[i].toXContent(jsonBuilder, params);
        }
        jsonBuilder.endArray();
    }
    TextTemplate text = null;
    if (randomBoolean()) {
        text = new TextTemplate(randomAlphaOfLength(200));
        jsonBuilder.field("text", text, params);
    }
    TextTemplate icon = null;
    if (randomBoolean()) {
        icon = new TextTemplate(randomAlphaOfLength(10));
        jsonBuilder.field("icon", icon);
    }
    Attachment.Template[] attachments = null;
    if (randomBoolean()) {
        jsonBuilder.startArray("attachments");
        attachments = new Attachment.Template[randomIntBetween(1, 3)];
        for (int i = 0; i < attachments.length; i++) {
            jsonBuilder.startObject();
            TextTemplate fallback = null;
            if (randomBoolean()) {
                fallback = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("fallback", fallback, params);
            }
            TextTemplate color = null;
            if (randomBoolean()) {
                color = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("color", color, params);
            }
            TextTemplate pretext = null;
            if (randomBoolean()) {
                pretext = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("pretext", pretext, params);
            }
            TextTemplate authorName = null;
            TextTemplate authorLink = null;
            TextTemplate authorIcon = null;
            if (randomBoolean()) {
                authorName = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("author_name", authorName, params);
                if (randomBoolean()) {
                    authorLink = new TextTemplate(randomAlphaOfLength(200));
                    jsonBuilder.field("author_link", authorLink, params);
                }
                if (randomBoolean()) {
                    authorIcon = new TextTemplate(randomAlphaOfLength(200));
                    jsonBuilder.field("author_icon", authorIcon, params);
                }
            }
            TextTemplate title = null;
            TextTemplate titleLink = null;
            if (randomBoolean()) {
                title = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("title", title, params);
                if (randomBoolean()) {
                    titleLink = new TextTemplate(randomAlphaOfLength(200));
                    jsonBuilder.field("title_link", titleLink, params);
                }
            }
            TextTemplate attachmentText = null;
            if (randomBoolean()) {
                attachmentText = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("text", attachmentText, params);
            }
            TextTemplate imageUrl = null;
            if (randomBoolean()) {
                imageUrl = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("image_url", imageUrl, params);
            }
            TextTemplate thumbUrl = null;
            if (randomBoolean()) {
                thumbUrl = new TextTemplate(randomAlphaOfLength(200));
                jsonBuilder.field("thumb_url", thumbUrl, params);
            }
            Field.Template[] fields = null;
            if (randomBoolean()) {
                jsonBuilder.startArray("fields");
                fields = new Field.Template[randomIntBetween(1, 3)];
                for (int j = 0; j < fields.length; j++) {
                    jsonBuilder.startObject();
                    TextTemplate fieldTitle = new TextTemplate(randomAlphaOfLength(50));
                    jsonBuilder.field("title", fieldTitle, params);
                    TextTemplate fieldValue = new TextTemplate(randomAlphaOfLength(50));
                    jsonBuilder.field("value", fieldValue, params);
                    boolean isShort = randomBoolean();
                    jsonBuilder.field("short", isShort);
                    fields[j] = new Field.Template(fieldTitle, fieldValue, isShort);
                    jsonBuilder.endObject();
                }
                jsonBuilder.endArray();
            }
            TextTemplate[] markdownSupportedFields = null;
            if (randomBoolean()) {
                jsonBuilder.startArray("mrkdwn_in");
                jsonBuilder.value("pretext");
                jsonBuilder.endArray();
                markdownSupportedFields = new TextTemplate[] { new TextTemplate("pretext") };
            }
            List<Action.Template> actions = new ArrayList<>();
            if (randomBoolean()) {
                jsonBuilder.startArray("actions");
                jsonBuilder.startObject();
                jsonBuilder.field("type", "button");
                jsonBuilder.field("text", "My text");
                jsonBuilder.field("url", "https://elastic.co");
                String style = randomFrom("primary", "danger");
                jsonBuilder.field("style", style);
                jsonBuilder.field("name", "somebuttonparty");
                jsonBuilder.endObject();
                jsonBuilder.endArray();
                Action.Template action = new Action.Template();
                action.setName(new TextTemplate("somebuttonparty"));
                action.setStyle(new TextTemplate(style));
                action.setText(new TextTemplate("My text"));
                action.setType(new TextTemplate("button"));
                action.setUrl(new TextTemplate("https://elastic.co"));
                actions.add(action);
            }
            jsonBuilder.endObject();
            attachments[i] = new Attachment.Template(fallback, color, pretext, authorName, authorLink, authorIcon, title, titleLink, attachmentText, fields, imageUrl, thumbUrl, markdownSupportedFields, actions);
        }
        jsonBuilder.endArray();
    }
    jsonBuilder.endObject();
    XContentParser parser = createParser(jsonBuilder);
    parser.nextToken();
    assertThat(parser.currentToken(), is(XContentParser.Token.START_OBJECT));
    SlackMessage.Template template = SlackMessage.Template.parse(parser);
    assertThat(template, notNullValue());
    assertThat(template.from, is(from));
    if (to == null) {
        assertThat(template.to, nullValue());
    } else {
        assertThat(template.to, arrayContaining(to));
    }
    assertThat(template.icon, is(icon));
    assertThat(template.text, is(text));
    if (attachments == null) {
        assertThat(template.attachments, nullValue());
    } else {
        for (int i = 0; i < attachments.length; i++) {
            assertThat(template.attachments[i], is(attachments[i]));
        }
    }
}
225489.776198gwt
public void testMultiSelectEnum() {
    TestAnnotatedMessages m = GWT.create(TestAnnotatedMessages.class);
    List<String> names = new ArrayList<String>();
    assertEquals("Nobody liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("Nobody liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("Nobody liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("Nobody liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("Nobody liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, null).asString());
    assertEquals("Nobody liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, Gender.UNKNOWN).asString());
    names.add("John");
    assertEquals("John liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("John liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("John liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("John liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("John liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("John liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
    names.add("Bob");
    assertEquals("John and Bob liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("John and Bob liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("John and Bob liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("John and Bob liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("John and Bob liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, null).asString());
    assertEquals("John and Bob liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, Gender.UNKNOWN).asString());
    names.add("Alice");
    assertEquals("John, Bob, and one other liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("John, Bob, and one other liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("John, Bob, and one other liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("John, Bob, and one other liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("John, Bob, and one other liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("John, Bob, and one other liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
    names.add("Carol");
    assertEquals("John, Bob, and 2 others liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("John, Bob, and 2 others liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("John, Bob, and 2 others liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("John, Bob, and 2 others liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("John, Bob, and 2 others liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("John, Bob, and 2 others liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
}
224944.076198gwt
public void testMultiSelectString() {
    TestAnnotatedMessages m = GWT.create(TestAnnotatedMessages.class);
    List<String> names = new ArrayList<String>();
    assertEquals("Nobody liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("Nobody liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("Nobody liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("Nobody liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("Nobody liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("Nobody liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("John");
    assertEquals("John liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("John liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("John liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("John liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("John liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("John liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Bob");
    assertEquals("John and Bob liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("John and Bob liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("John and Bob liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("John and Bob liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("John and Bob liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("John and Bob liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Alice");
    assertEquals("John, Bob, and one other liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("John, Bob, and one other liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("John, Bob, and one other liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("John, Bob, and one other liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("John, Bob, and one other liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("John, Bob, and one other liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Carol");
    assertEquals("John, Bob, and 2 others liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("John, Bob, and 2 others liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("John, Bob, and 2 others liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("John, Bob, and 2 others liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("John, Bob, and 2 others liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("John, Bob, and 2 others liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
}
225489.776198gwt
public void testMultiSelectEnum() {
    TestAnnotatedMessages m = GWT.create(TestAnnotatedMessages.class);
    List<String> names = new ArrayList<String>();
    assertEquals("test: Nobody liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("test: Nobody liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("test: Nobody liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("test: Nobody liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("test: Nobody liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, null).asString());
    assertEquals("test: Nobody liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, Gender.UNKNOWN).asString());
    names.add("John");
    assertEquals("test: John liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("test: John liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("test: John liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("test: John liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("test: John liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("test: John liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
    names.add("Bob");
    assertEquals("test: John and Bob liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("test: John and Bob liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("test: John and Bob liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("test: John and Bob liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("test: John and Bob liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, null).asString());
    assertEquals("test: John and Bob liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, Gender.UNKNOWN).asString());
    names.add("Alice");
    assertEquals("test: John, Bob, and one other liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("test: John, Bob, and one other liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("test: John, Bob, and one other liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("test: John, Bob, and one other liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("test: John, Bob, and one other liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("test: John, Bob, and one other liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
    names.add("Carol");
    assertEquals("test: John, Bob, and 2 others liked his message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.MALE).asString());
    assertEquals("test: John, Bob, and 2 others liked his 2 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, Gender.MALE).asString());
    assertEquals("test: John, Bob, and 2 others liked her message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.FEMALE).asString());
    assertEquals("test: John, Bob, and 2 others liked her 3 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, Gender.FEMALE).asString());
    assertEquals("test: John, Bob, and 2 others liked their message", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, Gender.UNKNOWN).asString());
    assertEquals("test: John, Bob, and 2 others liked their 4 messages", m.multiSelectEnum(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, null).asString());
}
224944.076198gwt
public void testMultiSelectString() {
    TestAnnotatedMessages m = GWT.create(TestAnnotatedMessages.class);
    List<String> names = new ArrayList<String>();
    assertEquals("test: Nobody liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("test: Nobody liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("test: Nobody liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("test: Nobody liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("test: Nobody liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("test: Nobody liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("John");
    assertEquals("test: John liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("test: John liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("test: John liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("test: John liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("test: John liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("test: John liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Bob");
    assertEquals("test: John and Bob liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("test: John and Bob liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("test: John and Bob liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("test: John and Bob liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("test: John and Bob liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("test: John and Bob liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Alice");
    assertEquals("test: John, Bob, and one other liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("test: John, Bob, and one other liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("test: John, Bob, and one other liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("test: John, Bob, and one other liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("test: John, Bob, and one other liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("test: John, Bob, and one other liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
    names.add("Carol");
    assertEquals("test: John, Bob, and 2 others liked his message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "MALE"));
    assertEquals("test: John, Bob, and 2 others liked his 2 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 2, "MALE"));
    assertEquals("test: John, Bob, and 2 others liked her message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "FEMALE"));
    assertEquals("test: John, Bob, and 2 others liked her 3 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 3, "FEMALE"));
    assertEquals("test: John, Bob, and 2 others liked their message", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 1, "unknown"));
    assertEquals("test: John, Bob, and 2 others liked their 4 messages", m.multiSelectString(names, names.size() > 0 ? names.get(0) : null, names.size() > 1 ? names.get(1) : null, 4, "unknown"));
}
224280.9717187hadoop
public void testPublishApplicationMetrics() throws Exception {
    long stateUpdateTimeStamp = System.currentTimeMillis();
    for (int i = 1; i <= 2; ++i) {
        ApplicationId appId = ApplicationId.newInstance(0, i);
        RMApp app = createRMApp(appId);
        metricsPublisher.appCreated(app, app.getStartTime());
        metricsPublisher.appLaunched(app, app.getLaunchTime());
        if (i == 1) {
            when(app.getQueue()).thenReturn("new test queue");
            ApplicationSubmissionContext asc = mock(ApplicationSubmissionContext.class);
            when(asc.getUnmanagedAM()).thenReturn(false);
            when(asc.getPriority()).thenReturn(Priority.newInstance(1));
            when(asc.getNodeLabelExpression()).thenReturn("high-cpu");
            ContainerLaunchContext containerLaunchContext = mock(ContainerLaunchContext.class);
            when(containerLaunchContext.getCommands()).thenReturn(Collections.singletonList("java -Xmx1024m"));
            when(asc.getAMContainerSpec()).thenReturn(containerLaunchContext);
            when(app.getApplicationSubmissionContext()).thenReturn(asc);
            when(app.getApplicationPriority()).thenReturn(Priority.newInstance(1));
            metricsPublisher.appUpdated(app, 4L);
        } else {
            metricsPublisher.appUpdated(app, 4L);
        }
        metricsPublisher.appStateUpdated(app, YarnApplicationState.RUNNING, stateUpdateTimeStamp);
        metricsPublisher.appFinished(app, RMAppState.FINISHED, app.getFinishTime());
        if (i == 1) {
            metricsPublisher.appACLsUpdated(app, "uers1,user2", 4L);
        } else {
            metricsPublisher.appACLsUpdated(app, null, 4L);
        }
        TimelineEntity entity = null;
        do {
            entity = store.getEntity(appId.toString(), ApplicationMetricsConstants.ENTITY_TYPE, EnumSet.allOf(Field.class));
        } while (entity == null || entity.getEvents().size() < 6);
        Assert.assertEquals(ApplicationMetricsConstants.ENTITY_TYPE, entity.getEntityType());
        Assert.assertEquals(app.getApplicationId().toString(), entity.getEntityId());
        Assert.assertEquals(app.getName(), entity.getOtherInfo().get(ApplicationMetricsConstants.NAME_ENTITY_INFO));
        if (i != 1) {
            Assert.assertEquals(app.getQueue(), entity.getOtherInfo().get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO));
        }
        Assert.assertEquals(app.getApplicationSubmissionContext().getUnmanagedAM(), entity.getOtherInfo().get(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO));
        if (i != 1) {
            Assert.assertEquals(app.getApplicationSubmissionContext().getPriority().getPriority(), entity.getOtherInfo().get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO));
        }
        Assert.assertEquals(app.getAmNodeLabelExpression(), entity.getOtherInfo().get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION));
        Assert.assertEquals(app.getApplicationSubmissionContext().getNodeLabelExpression(), entity.getOtherInfo().get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION));
        Assert.assertEquals(app.getUser(), entity.getOtherInfo().get(ApplicationMetricsConstants.USER_ENTITY_INFO));
        Assert.assertEquals(app.getApplicationType(), entity.getOtherInfo().get(ApplicationMetricsConstants.TYPE_ENTITY_INFO));
        Assert.assertEquals(app.getSubmitTime(), entity.getOtherInfo().get(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO));
        Assert.assertTrue(verifyAppTags(app.getApplicationTags(), entity.getOtherInfo()));
        if (i == 1) {
            Assert.assertEquals("uers1,user2", entity.getOtherInfo().get(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO));
            Assert.assertEquals(app.getApplicationSubmissionContext().getAMContainerSpec().getCommands(), entity.getOtherInfo().get(ApplicationMetricsConstants.AM_CONTAINER_LAUNCH_COMMAND));
        } else {
            Assert.assertEquals("", entity.getOtherInfo().get(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO));
            Assert.assertEquals(app.getRMAppMetrics().getMemorySeconds(), Long.parseLong(entity.getOtherInfo().get(ApplicationMetricsConstants.APP_MEM_METRICS).toString()));
            Assert.assertEquals(app.getRMAppMetrics().getVcoreSeconds(), Long.parseLong(entity.getOtherInfo().get(ApplicationMetricsConstants.APP_CPU_METRICS).toString()));
            Assert.assertEquals(app.getRMAppMetrics().getPreemptedMemorySeconds(), Long.parseLong(entity.getOtherInfo().get(ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS).toString()));
            Assert.assertEquals(app.getRMAppMetrics().getPreemptedVcoreSeconds(), Long.parseLong(entity.getOtherInfo().get(ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS).toString()));
        }
        Assert.assertEquals("context", entity.getOtherInfo().get(ApplicationMetricsConstants.YARN_APP_CALLER_CONTEXT));
        boolean hasCreatedEvent = false;
        boolean hasLaunchedEvent = false;
        boolean hasUpdatedEvent = false;
        boolean hasFinishedEvent = false;
        boolean hasACLsUpdatedEvent = false;
        boolean hasStateUpdateEvent = false;
        for (TimelineEvent event : entity.getEvents()) {
            if (event.getEventType().equals(ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
                hasCreatedEvent = true;
                Assert.assertEquals(app.getStartTime(), event.getTimestamp());
            } else if (event.getEventType().equals(ApplicationMetricsConstants.LAUNCHED_EVENT_TYPE)) {
                hasLaunchedEvent = true;
                Assert.assertEquals(app.getLaunchTime(), event.getTimestamp());
            } else if (event.getEventType().equals(ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
                hasFinishedEvent = true;
                Assert.assertEquals(app.getFinishTime(), event.getTimestamp());
                Assert.assertEquals(app.getDiagnostics().toString(), event.getEventInfo().get(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO));
                Assert.assertEquals(app.getFinalApplicationStatus().toString(), event.getEventInfo().get(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO));
                Assert.assertEquals(YarnApplicationState.FINISHED.toString(), event.getEventInfo().get(ApplicationMetricsConstants.STATE_EVENT_INFO));
            } else if (event.getEventType().equals(ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
                hasUpdatedEvent = true;
                Assert.assertEquals(4L, event.getTimestamp());
                if (1 == i) {
                    Assert.assertEquals(1, event.getEventInfo().get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO));
                    Assert.assertEquals("new test queue", event.getEventInfo().get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO));
                }
            } else if (event.getEventType().equals(ApplicationMetricsConstants.ACLS_UPDATED_EVENT_TYPE)) {
                hasACLsUpdatedEvent = true;
                Assert.assertEquals(4L, event.getTimestamp());
            } else if (event.getEventType().equals(ApplicationMetricsConstants.STATE_UPDATED_EVENT_TYPE)) {
                hasStateUpdateEvent = true;
                assertThat(event.getTimestamp()).isEqualTo(stateUpdateTimeStamp);
                Assert.assertEquals(YarnApplicationState.RUNNING.toString(), event.getEventInfo().get(ApplicationMetricsConstants.STATE_EVENT_INFO));
            }
        }
        Assert.assertTrue(hasCreatedEvent);
        Assert.assertTrue(hasLaunchedEvent);
        Assert.assertTrue(hasFinishedEvent);
        Assert.assertTrue(hasACLsUpdatedEvent);
        Assert.assertTrue(hasUpdatedEvent);
        Assert.assertTrue(hasStateUpdateEvent);
    }
}
227059.116188hadoop
public void testSortedQueues() throws Exception {
    setupSortedQueues(csConf);
    queueContext.reinitialize();
    CSQueueStore queues = new CSQueueStore();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(queueContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    final int memoryPerNode = 10;
    final int coresPerNode = 16;
    final int numNodes = 1;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    doNothing().when(node_0).releaseContainer(any(ContainerId.class), anyBoolean());
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    CSQueue a = queues.get(A);
    CSQueue b = queues.get(B);
    CSQueue c = queues.get(C);
    CSQueue d = queues.get(D);
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    final String user_0 = "user_0";
    FiCaSchedulerApp app_0 = getMockApplication(0, user_0);
    doReturn(true).when(app_0).containerCompleted(any(RMContainer.class), any(), any(RMContainerEventType.class), any(String.class));
    Priority priority = TestUtils.createMockPriority(1);
    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
    DrainDispatcher drainDispatcher = new DrainDispatcher();
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
    RMContext rmContext = mock(RMContext.class);
    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
    when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    Container container = TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1 * GB), priority);
    RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_0.getNodeID(), "user", rmContext);
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 3; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    for (int i = 0; i < 4; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 4 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    for (int i = 0; i < 3; i++) {
        d.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    }
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 1 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    for (int i = 0; i < 2; i++) {
        stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
        stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
        stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
        root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    }
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    a.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 3 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    b.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
    verifyQueueMetrics(a, 2 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 1 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(d, b);
    allocationOrder.verify(d).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 2 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    verifyQueueMetrics(d, 2 * GB, clusterResource);
    LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
}
225798.91209hadoop
public void testQueueMaxAMShareWithContainerReservation() throws Exception {
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    conf.setFloat(FairSchedulerConfiguration.RESERVABLE_NODES, 1f);
    AllocationFileWriter.create().addQueue(new AllocationFileQueue.Builder("queue1").maxAMShare(0.5).build()).writeToFile(ALLOC_FILE);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 10), 1, "127.0.0.1");
    RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 10), 2, "127.0.0.2");
    RMNode node3 = MockNodes.newNodeInfo(1, Resources.createResource(5120, 5), 3, "127.0.0.3");
    NodeAddedSchedulerEvent nodeE1 = new NodeAddedSchedulerEvent(node1);
    NodeUpdateSchedulerEvent updateE1 = new NodeUpdateSchedulerEvent(node1);
    NodeAddedSchedulerEvent nodeE2 = new NodeAddedSchedulerEvent(node2);
    NodeUpdateSchedulerEvent updateE2 = new NodeUpdateSchedulerEvent(node2);
    NodeAddedSchedulerEvent nodeE3 = new NodeAddedSchedulerEvent(node3);
    NodeUpdateSchedulerEvent updateE3 = new NodeUpdateSchedulerEvent(node3);
    scheduler.handle(nodeE1);
    scheduler.handle(nodeE2);
    scheduler.handle(nodeE3);
    scheduler.update();
    FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
    Resource amResource1 = Resource.newInstance(1024, 1);
    Resource amResource2 = Resource.newInstance(1024, 1);
    Resource amResource3 = Resource.newInstance(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 1);
    Resource amResource4 = Resource.newInstance(5120, 1);
    Resource amResource5 = Resource.newInstance(1024, 1);
    Resource amResource6 = Resource.newInstance(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 1);
    Resource amResource7 = Resource.newInstance(1024, 1);
    Resource amResource8 = Resource.newInstance(1024, 1);
    int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
    ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
    createApplicationWithAMResource(attId1, "queue1", "user1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId1);
    FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemorySize());
    assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
    createApplicationWithAMResource(attId2, "queue1", "user1", amResource2);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId2);
    FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
    scheduler.update();
    scheduler.handle(updateE2);
    assertEquals("Application2's AM requests 1024 MB memory", 1024, app2.getAMResource().getMemorySize());
    assertEquals("Application2's AM should be running", 1, app2.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
    createApplicationWithAMResource(attId3, "queue1", "user1", amResource3);
    createSchedulingRequestExistingApplication(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 1, amPriority, attId3);
    FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
    scheduler.update();
    scheduler.handle(updateE1);
    scheduler.handle(updateE2);
    assertEquals("Application3's AM resource shouldn't be updated", 0, app3.getAMResource().getMemorySize());
    assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
    createApplicationWithAMResource(attId4, "queue1", "user1", amResource4);
    createSchedulingRequestExistingApplication(5120, 1, amPriority, attId4);
    FSAppAttempt app4 = scheduler.getSchedulerApp(attId4);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application4's AM resource shouldn't be updated", 0, app4.getAMResource().getMemorySize());
    assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateE3);
    assertEquals("Application4's AM requests 5120 MB memory", 5120, app4.getAMResource().getMemorySize());
    assertEquals("Application4's AM should be running", 1, app4.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 7168 MB memory", 7168, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId1, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent1);
    assertEquals("Queue1's AM resource usage should be 6144 MB memory", 6144, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId5 = createAppAttemptId(5, 1);
    createApplicationWithAMResource(attId5, "queue1", "user1", amResource5);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId5);
    FSAppAttempt app5 = scheduler.getSchedulerApp(attId5);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application5's AM requests 1024 MB memory", 1024, app5.getAMResource().getMemorySize());
    assertEquals("Application5's AM should be running", 1, app5.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 7168 MB memory", 7168, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent3 = new AppAttemptRemovedSchedulerEvent(attId3, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent3);
    assertEquals("Queue1's AM resource usage should be 7168 MB memory", 7168, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId6 = createAppAttemptId(6, 1);
    createApplicationWithAMResource(attId6, "queue1", "user1", amResource6);
    createSchedulingRequestExistingApplication(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, 1, amPriority, attId6);
    FSAppAttempt app6 = scheduler.getSchedulerApp(attId6);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application6's AM resource shouldn't be updated", 0, app6.getAMResource().getMemorySize());
    assertEquals("Application6's AM should not be running", 0, app6.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 7168 MB memory", 7168, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId7 = createAppAttemptId(7, 1);
    createApplicationWithAMResource(attId7, "queue1", "user1", amResource7);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId7);
    FSAppAttempt app7 = scheduler.getSchedulerApp(attId7);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application7's AM requests 1024 MB memory", 1024, app7.getAMResource().getMemorySize());
    assertEquals("Application7's AM should be running", 1, app7.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 8192 MB memory", 8192, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent4 = new AppAttemptRemovedSchedulerEvent(attId4, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent4);
    assertEquals("Queue1's AM resource usage should be 3072 MB memory", 3072, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent5 = new AppAttemptRemovedSchedulerEvent(attId5, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent5);
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateE1);
    scheduler.handle(updateE2);
    ApplicationAttemptId attId8 = createAppAttemptId(8, 1);
    createApplicationWithAMResource(attId8, "queue1", "user1", amResource8);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId8);
    FSAppAttempt app8 = scheduler.getSchedulerApp(attId8);
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application8's AM resource shouldn't be updated", 0, app8.getAMResource().getMemorySize());
    assertEquals("Application8's AM should not be running", 0, app8.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateE2);
    assertEquals("Application8's AM resource shouldn't be updated", 0, app8.getAMResource().getMemorySize());
    assertEquals("Application8's AM should not be running", 0, app8.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent2 = new AppAttemptRemovedSchedulerEvent(attId2, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent2);
    assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateE2);
    assertEquals("Application6's AM requests 10240 MB memory", RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, app6.getAMResource().getMemorySize());
    assertEquals("Application6's AM should be running", 1, app6.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 11264 MB memory", 11264, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateE1);
    assertEquals("Application8's AM requests 1024 MB memory", 1024, app8.getAMResource().getMemorySize());
    assertEquals("Application8's AM should be running", 1, app8.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 12288 MB memory", 12288, queue1.getAmResourceUsage().getMemorySize());
}
226159.421218hadoop
public void testTwoLevelWithUserMetrics() {
    AppSchedulingInfo app = mockApp(USER);
    QueueInfo root = new QueueInfo(null, "root", ms, conf, USER);
    QueueInfo leaf = new QueueInfo(root, "root.leaf", ms, conf, USER);
    leaf.queueMetrics.submitApp(USER, false);
    AppMetricsChecker appMetricsQueueSourceChecker = AppMetricsChecker.create().counter(APPS_SUBMITTED, 1).checkAgainst(leaf.queueSource, true);
    AppMetricsChecker appMetricsParentQueueSourceChecker = AppMetricsChecker.create().counter(APPS_SUBMITTED, 1).checkAgainst(root.queueSource, true);
    AppMetricsChecker appMetricsUserSourceChecker = AppMetricsChecker.create().counter(APPS_SUBMITTED, 1).checkAgainst(leaf.userSource, true);
    AppMetricsChecker appMetricsParentUserSourceChecker = AppMetricsChecker.create().counter(APPS_SUBMITTED, 1).checkAgainst(root.userSource, true);
    leaf.queueMetrics.submitAppAttempt(USER, false);
    appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker).gaugeInt(APPS_PENDING, 1).checkAgainst(leaf.queueSource, true);
    appMetricsParentQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsParentQueueSourceChecker).gaugeInt(APPS_PENDING, 1).checkAgainst(root.queueSource, true);
    appMetricsUserSourceChecker = AppMetricsChecker.createFromChecker(appMetricsUserSourceChecker).gaugeInt(APPS_PENDING, 1).checkAgainst(leaf.userSource, true);
    appMetricsParentUserSourceChecker = AppMetricsChecker.createFromChecker(appMetricsParentUserSourceChecker).gaugeInt(APPS_PENDING, 1).checkAgainst(root.userSource, true);
    root.queueMetrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL, Resources.createResource(100 * GB, 100));
    leaf.queueMetrics.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL, Resources.createResource(100 * GB, 100));
    root.queueMetrics.setAvailableResourcesToUser(RMNodeLabelsManager.NO_LABEL, USER, Resources.createResource(10 * GB, 10));
    leaf.queueMetrics.setAvailableResourcesToUser(RMNodeLabelsManager.NO_LABEL, USER, Resources.createResource(10 * GB, 10));
    leaf.queueMetrics.incrPendingResources(RMNodeLabelsManager.NO_LABEL, USER, 5, Resources.createResource(3 * GB, 3));
    ResourceMetricsChecker resMetricsQueueSourceChecker = ResourceMetricsChecker.createMandatoryResourceChecker().gaugeLong(AVAILABLE_MB, 100 * GB).gaugeInt(AVAILABLE_V_CORES, 100).gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15).gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(leaf.queueSource);
    ResourceMetricsChecker resMetricsParentQueueSourceChecker = ResourceMetricsChecker.createMandatoryResourceChecker().gaugeLong(AVAILABLE_MB, 100 * GB).gaugeInt(AVAILABLE_V_CORES, 100).gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15).gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(root.queueSource);
    ResourceMetricsChecker resMetricsUserSourceChecker = ResourceMetricsChecker.createMandatoryResourceChecker().gaugeLong(AVAILABLE_MB, 10 * GB).gaugeInt(AVAILABLE_V_CORES, 10).gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15).gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(leaf.userSource);
    ResourceMetricsChecker resMetricsParentUserSourceChecker = ResourceMetricsChecker.createMandatoryResourceChecker().gaugeLong(AVAILABLE_MB, 10 * GB).gaugeInt(AVAILABLE_V_CORES, 10).gaugeLong(PENDING_MB, 15 * GB).gaugeInt(PENDING_V_CORES, 15).gaugeInt(PENDING_CONTAINERS, 5).checkAgainst(root.userSource);
    leaf.queueMetrics.runAppAttempt(app.getApplicationId(), USER, false);
    appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker).gaugeInt(APPS_PENDING, 0).gaugeInt(APPS_RUNNING, 1).checkAgainst(leaf.queueSource, true);
    appMetricsUserSourceChecker = AppMetricsChecker.createFromChecker(appMetricsUserSourceChecker).gaugeInt(APPS_PENDING, 0).gaugeInt(APPS_RUNNING, 1).checkAgainst(leaf.userSource, true);
    leaf.queueMetrics.allocateResources(RMNodeLabelsManager.NO_LABEL, USER, 3, Resources.createResource(2 * GB, 2), true);
    leaf.queueMetrics.reserveResource(RMNodeLabelsManager.NO_LABEL, USER, Resources.createResource(3 * GB, 3));
    resMetricsQueueSourceChecker = ResourceMetricsChecker.createFromChecker(resMetricsQueueSourceChecker).gaugeLong(ALLOCATED_MB, 6 * GB).gaugeInt(ALLOCATED_V_CORES, 6).gaugeInt(ALLOCATED_CONTAINERS, 3).counter(AGGREGATE_CONTAINERS_ALLOCATED, 3).gaugeLong(PENDING_MB, 9 * GB).gaugeInt(PENDING_V_CORES, 9).gaugeInt(PENDING_CONTAINERS, 2).gaugeLong(RESERVED_MB, 3 * GB).gaugeInt(RESERVED_V_CORES, 3).gaugeInt(RESERVED_CONTAINERS, 1).checkAgainst(leaf.queueSource);
    resMetricsParentQueueSourceChecker = ResourceMetricsChecker.createFromChecker(resMetricsParentQueueSourceChecker).gaugeLong(ALLOCATED_MB, 6 * GB).gaugeInt(ALLOCATED_V_CORES, 6).gaugeInt(ALLOCATED_CONTAINERS, 3).counter(AGGREGATE_CONTAINERS_ALLOCATED, 3).gaugeLong(PENDING_MB, 9 * GB).gaugeInt(PENDING_V_CORES, 9).gaugeInt(PENDING_CONTAINERS, 2).gaugeLong(RESERVED_MB, 3 * GB).gaugeInt(RESERVED_V_CORES, 3).gaugeInt(RESERVED_CONTAINERS, 1).checkAgainst(root.queueSource);
    resMetricsUserSourceChecker = ResourceMetricsChecker.createFromChecker(resMetricsUserSourceChecker).gaugeLong(ALLOCATED_MB, 6 * GB).gaugeInt(ALLOCATED_V_CORES, 6).gaugeInt(ALLOCATED_CONTAINERS, 3).counter(AGGREGATE_CONTAINERS_ALLOCATED, 3).gaugeLong(PENDING_MB, 9 * GB).gaugeInt(PENDING_V_CORES, 9).gaugeInt(PENDING_CONTAINERS, 2).gaugeLong(RESERVED_MB, 3 * GB).gaugeInt(RESERVED_V_CORES, 3).gaugeInt(RESERVED_CONTAINERS, 1).checkAgainst(leaf.userSource);
    resMetricsParentUserSourceChecker = ResourceMetricsChecker.createFromChecker(resMetricsParentUserSourceChecker).gaugeLong(ALLOCATED_MB, 6 * GB).gaugeInt(ALLOCATED_V_CORES, 6).gaugeInt(ALLOCATED_CONTAINERS, 3).counter(AGGREGATE_CONTAINERS_ALLOCATED, 3).gaugeLong(PENDING_MB, 9 * GB).gaugeInt(PENDING_V_CORES, 9).gaugeInt(PENDING_CONTAINERS, 2).gaugeLong(RESERVED_MB, 3 * GB).gaugeInt(RESERVED_V_CORES, 3).gaugeInt(RESERVED_CONTAINERS, 1).checkAgainst(root.userSource);
    leaf.queueMetrics.releaseResources(RMNodeLabelsManager.NO_LABEL, USER, 1, Resources.createResource(2 * GB, 2));
    leaf.queueMetrics.unreserveResource(RMNodeLabelsManager.NO_LABEL, USER, Resources.createResource(3 * GB, 3));
    ResourceMetricsChecker.createFromChecker(resMetricsQueueSourceChecker).gaugeLong(ALLOCATED_MB, 4 * GB).gaugeInt(ALLOCATED_V_CORES, 4).gaugeInt(ALLOCATED_CONTAINERS, 2).counter(AGGREGATE_CONTAINERS_RELEASED, 1).gaugeLong(RESERVED_MB, 0).gaugeInt(RESERVED_V_CORES, 0).gaugeInt(RESERVED_CONTAINERS, 0).checkAgainst(leaf.queueSource);
    ResourceMetricsChecker.createFromChecker(resMetricsParentQueueSourceChecker).gaugeLong(ALLOCATED_MB, 4 * GB).gaugeInt(ALLOCATED_V_CORES, 4).gaugeInt(ALLOCATED_CONTAINERS, 2).counter(AGGREGATE_CONTAINERS_RELEASED, 1).gaugeLong(RESERVED_MB, 0).gaugeInt(RESERVED_V_CORES, 0).gaugeInt(RESERVED_CONTAINERS, 0).checkAgainst(root.queueSource);
    ResourceMetricsChecker.createFromChecker(resMetricsUserSourceChecker).gaugeLong(ALLOCATED_MB, 4 * GB).gaugeInt(ALLOCATED_V_CORES, 4).gaugeInt(ALLOCATED_CONTAINERS, 2).counter(AGGREGATE_CONTAINERS_RELEASED, 1).gaugeLong(RESERVED_MB, 0).gaugeInt(RESERVED_V_CORES, 0).gaugeInt(RESERVED_CONTAINERS, 0).checkAgainst(leaf.userSource);
    ResourceMetricsChecker.createFromChecker(resMetricsParentUserSourceChecker).gaugeLong(ALLOCATED_MB, 4 * GB).gaugeInt(ALLOCATED_V_CORES, 4).gaugeInt(ALLOCATED_CONTAINERS, 2).counter(AGGREGATE_CONTAINERS_RELEASED, 1).gaugeLong(RESERVED_MB, 0).gaugeInt(RESERVED_V_CORES, 0).gaugeInt(RESERVED_CONTAINERS, 0).checkAgainst(root.userSource);
    leaf.queueMetrics.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser(), false);
    appMetricsQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker).counter(APPS_SUBMITTED, 1).gaugeInt(APPS_RUNNING, 0).checkAgainst(leaf.queueSource, true);
    appMetricsParentQueueSourceChecker = AppMetricsChecker.createFromChecker(appMetricsParentQueueSourceChecker).counter(APPS_SUBMITTED, 1).gaugeInt(APPS_PENDING, 0).gaugeInt(APPS_RUNNING, 0).checkAgainst(root.queueSource, true);
    appMetricsUserSourceChecker = AppMetricsChecker.createFromChecker(appMetricsUserSourceChecker).counter(APPS_SUBMITTED, 1).gaugeInt(APPS_RUNNING, 0).checkAgainst(leaf.userSource, true);
    appMetricsParentUserSourceChecker = AppMetricsChecker.createFromChecker(appMetricsParentUserSourceChecker).counter(APPS_SUBMITTED, 1).gaugeInt(APPS_PENDING, 0).gaugeInt(APPS_RUNNING, 0).checkAgainst(root.userSource, true);
    leaf.queueMetrics.finishApp(USER, RMAppState.FINISHED, false);
    AppMetricsChecker.createFromChecker(appMetricsQueueSourceChecker).counter(APPS_COMPLETED, 1).checkAgainst(leaf.queueSource, true);
    AppMetricsChecker.createFromChecker(appMetricsParentQueueSourceChecker).counter(APPS_COMPLETED, 1).checkAgainst(root.queueSource, true);
    AppMetricsChecker.createFromChecker(appMetricsUserSourceChecker).counter(APPS_COMPLETED, 1).checkAgainst(leaf.userSource, true);
    AppMetricsChecker.createFromChecker(appMetricsParentUserSourceChecker).counter(APPS_COMPLETED, 1).checkAgainst(root.userSource, true);
}
225315.4315180hadoop
public void testReadAppsRelatesTo() throws Exception {
    TimelineFilterList rt = new TimelineFilterList(Operator.OR);
    rt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    rt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    int relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444");
        }
    }
    assertEquals(3, relatesToCnt);
    TimelineFilterList rt1 = new TimelineFilterList();
    rt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    rt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity id should have been application_1111111111_3333");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt2 = new TimelineFilterList(Operator.OR);
    rt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    rt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt3 = new TimelineFilterList();
    rt3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1", "relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt3).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222")) {
            Assert.fail("Entity id should have been application_1111111111_2222");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt4 = new TimelineFilterList();
    rt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    rt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt4).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList rt5 = new TimelineFilterList();
    rt5.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatedto1", "relatesto8"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt5).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList rt6 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt6).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222")) {
            Assert.fail("Entity id should have been application_1111111111_2222");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList list3 = new TimelineFilterList();
    list3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    list3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList list4 = new TimelineFilterList();
    list4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    list4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto2"))));
    TimelineFilterList combinedList = new TimelineFilterList(Operator.OR, list3, list4);
    TimelineFilterList rt7 = new TimelineFilterList(Operator.AND, combinedList, new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().relatesTo(rt7).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity id should have been application_1111111111_3333");
        }
    }
    assertEquals(0, relatesToCnt);
}
226010.779183kafka
 void testDescribeTopicPartitionsRequest() {
    Authorizer authorizer = mock(Authorizer.class);
    String unauthorizedTopic = "unauthorized-topic";
    String authorizedTopic = "authorized-topic";
    String authorizedNonExistTopic = "authorized-non-exist";
    Action expectedActions1 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, unauthorizedTopic, PatternType.LITERAL), 1, true, true);
    Action expectedActions2 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true);
    Action expectedActions3 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedNonExistTopic, PatternType.LITERAL), 1, true, true);
    when(authorizer.authorize(any(RequestContext.class), argThat(t -> t.contains(expectedActions1) || t.contains(expectedActions2) || t.contains(expectedActions3)))).thenAnswer(invocation -> {
        List<Action> actions = invocation.getArgument(1);
        return actions.stream().map(action -> {
            if (action.resourcePattern().name().startsWith("authorized"))
                return AuthorizationResult.ALLOWED;
            else
                return AuthorizationResult.DENIED;
        }).collect(Collectors.toList());
    });
    Uuid authorizedTopicId = Uuid.randomUuid();
    Uuid unauthorizedTopicId = Uuid.randomUuid();
    Map<String, Uuid> topicIds = new HashMap<>();
    topicIds.put(authorizedTopic, authorizedTopicId);
    topicIds.put(unauthorizedTopic, unauthorizedTopicId);
    BrokerEndpointCollection collection = new BrokerEndpointCollection();
    collection.add(new BrokerEndpoint().setName(broker.endpoints().get(0).listener()).setHost(broker.endpoints().get(0).host()).setPort(broker.endpoints().get(0).port()).setSecurityProtocol(broker.endpoints().get(0).securityProtocol()));
    List<ApiMessage> records = Arrays.asList(new RegisterBrokerRecord().setBrokerId(broker.id()).setBrokerEpoch(0).setIncarnationId(Uuid.randomUuid()).setEndPoints(collection).setRack(broker.rack()).setFenced(false), new TopicRecord().setName(authorizedTopic).setTopicId(topicIds.get(authorizedTopic)), new TopicRecord().setName(unauthorizedTopic).setTopicId(topicIds.get(unauthorizedTopic)), new PartitionRecord().setTopicId(authorizedTopicId).setPartitionId(1).setReplicas(Arrays.asList(0, 1, 2)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(2)).setLeaderEpoch(0).setPartitionEpoch(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord().setTopicId(authorizedTopicId).setPartitionId(0).setReplicas(Arrays.asList(0, 1, 2)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(2)).setLeaderEpoch(0).setPartitionEpoch(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord().setTopicId(unauthorizedTopicId).setPartitionId(0).setReplicas(Arrays.asList(0, 1, 3)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(3)).setLeaderEpoch(0).setPartitionEpoch(2).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()));
    KRaftMetadataCache metadataCache = new KRaftMetadataCache(0);
    updateKraftMetadataCache(metadataCache, records);
    DescribeTopicPartitionsRequestHandler handler = new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizer)), createKafkaDefaultConfig());
    DescribeTopicPartitionsRequest describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic))));
    RequestChannel.Request request;
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    DescribeTopicPartitionsResponseData response = handler.handleDescribeTopicPartitionsRequest(request);
    List<DescribeTopicPartitionsResponseTopic> topics = response.topics().valuesList();
    assertEquals(2, topics.size());
    DescribeTopicPartitionsResponseTopic topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(2, topicToCheck.partitions().size());
    topicToCheck = topics.get(1);
    assertNotEquals(unauthorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), topicToCheck.errorCode());
    assertEquals(unauthorizedTopic, topicToCheck.name());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic))).setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(1)));
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    response = handler.handleDescribeTopicPartitionsRequest(request);
    topics = response.topics().valuesList();
    assertEquals(2, topics.size());
    topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    topicToCheck = topics.get(1);
    assertNotEquals(unauthorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), topicToCheck.errorCode());
    assertEquals(unauthorizedTopic, topicToCheck.name());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData());
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    response = handler.handleDescribeTopicPartitionsRequest(request);
    topics = response.topics().valuesList();
    assertEquals(1, topics.size());
    topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(2, topicToCheck.partitions().size());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(1)));
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    response = handler.handleDescribeTopicPartitionsRequest(request);
    topics = response.topics().valuesList();
    assertEquals(1, topics.size());
    topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setResponsePartitionLimit(1));
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    response = handler.handleDescribeTopicPartitionsRequest(request);
    topics = response.topics().valuesList();
    assertEquals(1, topics.size());
    topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    assertEquals(authorizedTopic, response.nextCursor().topicName());
    assertEquals(1, response.nextCursor().partitionIndex());
}
223141.5227177kafka
private void generateVariableLengthFieldSize(FieldSpec field, Versions possibleVersions, boolean tagged) {
    IsNullConditional.forField(field).alwaysEmitBlockScope(true).possibleVersions(possibleVersions).nullableVersions(field.nullableVersions()).ifNull(() -> {
        if (!tagged || !field.defaultString().equals("null")) {
            VersionConditional.forVersions(fieldFlexibleVersions(field), possibleVersions).ifMember(__ -> {
                if (tagged) {
                    buffer.printf("_numTaggedFields++;%n");
                    buffer.printf("_size.addBytes(%d);%n", MessageGenerator.sizeOfUnsignedVarint(field.tag().get()));
                    buffer.printf("_size.addBytes(%d);%n", MessageGenerator.sizeOfUnsignedVarint(MessageGenerator.sizeOfUnsignedVarint(0)));
                }
                buffer.printf("_size.addBytes(%d);%n", MessageGenerator.sizeOfUnsignedVarint(0));
            }).ifNotMember(__ -> {
                if (tagged) {
                    throw new RuntimeException("Tagged field " + field.name() + " should not be present in non-flexible versions.");
                }
                if (field.type().isString()) {
                    buffer.printf("_size.addBytes(2);%n");
                } else if (field.type().isStruct()) {
                    buffer.printf("_size.addBytes(1);%n");
                } else {
                    buffer.printf("_size.addBytes(4);%n");
                }
            }).generate(buffer);
        }
    }).ifShouldNotBeNull(() -> {
        if (tagged) {
            if (!field.defaultString().equals("null")) {
                field.generateNonDefaultValueCheck(headerGenerator, structRegistry, buffer, "this.", Versions.NONE);
                buffer.incrementIndent();
            }
            buffer.printf("_numTaggedFields++;%n");
            buffer.printf("_size.addBytes(%d);%n", MessageGenerator.sizeOfUnsignedVarint(field.tag().get()));
        }
        if (field.type().isString()) {
            generateStringToBytes(field.camelCaseName());
            VersionConditional.forVersions(fieldFlexibleVersions(field), possibleVersions).ifMember(__ -> {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                if (tagged) {
                    buffer.printf("int _stringPrefixSize = " + "ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);%n");
                    buffer.printf("_size.addBytes(_stringBytes.length + _stringPrefixSize + " + "ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize + _stringBytes.length));%n");
                } else {
                    buffer.printf("_size.addBytes(_stringBytes.length + " + "ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));%n");
                }
            }).ifNotMember(__ -> {
                if (tagged) {
                    throw new RuntimeException("Tagged field " + field.name() + " should not be present in non-flexible versions.");
                }
                buffer.printf("_size.addBytes(_stringBytes.length + 2);%n");
            }).generate(buffer);
        } else if (field.type().isArray()) {
            if (tagged) {
                buffer.printf("int _sizeBeforeArray = _size.totalSize();%n");
            }
            VersionConditional.forVersions(fieldFlexibleVersions(field), possibleVersions).ifMember(__ -> {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(%s.size() + 1));%n", field.camelCaseName());
            }).ifNotMember(__ -> {
                buffer.printf("_size.addBytes(4);%n");
            }).generate(buffer);
            FieldType.ArrayType arrayType = (FieldType.ArrayType) field.type();
            FieldType elementType = arrayType.elementType();
            if (elementType.fixedLength().isPresent()) {
                buffer.printf("_size.addBytes(%s.size() * %d);%n", field.camelCaseName(), elementType.fixedLength().get());
            } else if (elementType instanceof FieldType.ArrayType) {
                throw new RuntimeException("Arrays of arrays are not supported " + "(use a struct).");
            } else {
                buffer.printf("for (%s %sElement : %s) {%n", elementType.getBoxedJavaType(headerGenerator), field.camelCaseName(), field.camelCaseName());
                buffer.incrementIndent();
                generateVariableLengthArrayElementSize(fieldFlexibleVersions(field), String.format("%sElement", field.camelCaseName()), elementType, possibleVersions);
                buffer.decrementIndent();
                buffer.printf("}%n");
            }
            if (tagged) {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                buffer.printf("int _arraySize = _size.totalSize() - _sizeBeforeArray;%n");
                buffer.printf("_cache.setArraySizeInBytes(%s, _arraySize);%n", field.camelCaseName());
                buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_arraySize));%n");
            }
        } else if (field.type().isBytes()) {
            if (tagged) {
                buffer.printf("int _sizeBeforeBytes = _size.totalSize();%n");
            }
            if (field.zeroCopy()) {
                buffer.printf("_size.addZeroCopyBytes(%s.remaining());%n", field.camelCaseName());
            } else {
                buffer.printf("_size.addBytes(%s.length);%n", field.camelCaseName());
            }
            VersionConditional.forVersions(fieldFlexibleVersions(field), possibleVersions).ifMember(__ -> {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                if (field.zeroCopy()) {
                    buffer.printf("_size.addBytes(" + "ByteUtils.sizeOfUnsignedVarint(%s.remaining() + 1));%n", field.camelCaseName());
                } else {
                    buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(%s.length + 1));%n", field.camelCaseName());
                }
            }).ifNotMember(__ -> {
                buffer.printf("_size.addBytes(4);%n");
            }).generate(buffer);
            if (tagged) {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                buffer.printf("int _bytesSize = _size.totalSize() - _sizeBeforeBytes;%n");
                buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_bytesSize));%n");
            }
        } else if (field.type().isRecords()) {
            buffer.printf("_size.addZeroCopyBytes(%s.sizeInBytes());%n", field.camelCaseName());
            VersionConditional.forVersions(fieldFlexibleVersions(field), possibleVersions).ifMember(__ -> {
                headerGenerator.addImport(MessageGenerator.BYTE_UTILS_CLASS);
                buffer.printf("_size.addBytes(" + "ByteUtils.sizeOfUnsignedVarint(%s.sizeInBytes() + 1));%n", field.camelCaseName());
            }).ifNotMember(__ -> {
                buffer.printf("_size.addBytes(4);%n");
            }).generate(buffer);
        } else if (field.type().isStruct()) {
            VersionConditional.forVersions(field.nullableVersions(), possibleVersions).ifMember(__ -> {
                buffer.printf("_size.addBytes(1);%n");
            }).generate(buffer);
            if (tagged) {
                buffer.printf("int _sizeBeforeStruct = _size.totalSize();%n", field.camelCaseName());
                buffer.printf("this.%s.addSize(_size, _cache, _version);%n", field.camelCaseName());
                buffer.printf("int _structSize = _size.totalSize() - _sizeBeforeStruct;%n", field.camelCaseName());
                buffer.printf("_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize));%n");
            } else {
                buffer.printf("this.%s.addSize(_size, _cache, _version);%n", field.camelCaseName());
            }
        } else {
            throw new RuntimeException("unhandled type " + field.type());
        }
        if (tagged && !field.defaultString().equals("null")) {
            buffer.decrementIndent();
            buffer.printf("}%n");
        }
    }).generate(buffer);
}
225830.761225kafka
public void testConsumerGroupHeartbeatWithCompletingRebalanceClassicGroup() throws Exception {
    String groupId = "group-id";
    String memberId1 = "member-id-1";
    String memberId2 = "member-id-2";
    String memberId3 = "member-id-3";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
            put(memberId3, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 1))));
        }
    }));
    MetadataImage metadataImage = new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addRacks().build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(metadataImage).build();
    JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols1 = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1);
    protocols1.add(new JoinGroupRequestData.JoinGroupRequestProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)))))));
    JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols2 = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1);
    protocols2.add(new JoinGroupRequestData.JoinGroupRequestProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(barTopicName, 0)))))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1))))));
            put(memberId2, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(barTopicName, 0))))));
        }
    };
    ClassicGroup group = context.createClassicGroup(groupId);
    group.setProtocolName(Optional.ofNullable("range"));
    group.add(new ClassicGroupMember(memberId1, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", protocols1, assignments.get(memberId1)));
    group.add(new ClassicGroupMember(memberId2, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", protocols2, assignments.get(memberId2)));
    group.transitionTo(PREPARING_REBALANCE);
    group.transitionTo(COMPLETING_REBALANCE);
    group.transitionTo(STABLE);
    context.replay(CoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion()));
    context.commit();
    group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    context.sendClassicGroupJoin(new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId("group-id").withMemberId(memberId1).withProtocols(protocols1).withSessionTimeoutMs(5000).withRebalanceTimeoutMs(10000).build());
    context.sendClassicGroupJoin(new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId("group-id").withMemberId(memberId2).withProtocols(protocols2).withSessionTimeoutMs(5000).withRebalanceTimeoutMs(10000).build());
    assertTrue(group.isInState(COMPLETING_REBALANCE));
    GroupMetadataManagerTestContext.SyncResult syncResult = context.sendClassicGroupSync(new GroupMetadataManagerTestContext.SyncGroupRequestBuilder().withGroupId("group-id").withMemberId(memberId2).withGenerationId(1).build());
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeatResult = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setRebalanceTimeoutMs(5000).setServerAssignor("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setTopicPartitions(Collections.emptyList()));
    ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1).setMemberEpoch(1).setPreviousMemberEpoch(1).setClientId("client-id").setClientHost("client-host").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(10000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols1))).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))).build();
    ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2).setMemberEpoch(1).setPreviousMemberEpoch(1).setClientId("client-id").setClientHost("client-host").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(10000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols2))).setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 0))).build();
    ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(memberId3).setMemberEpoch(2).setPreviousMemberEpoch(0).setState(MemberState.UNRELEASED_PARTITIONS).setClientId("client").setClientHost("localhost/127.0.0.1").setServerAssignorName("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(5000).setAssignedPartitions(Collections.emptyMap()).build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 1), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, expectedMember1.assignedPartitions()), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, expectedMember2.assignedPartitions()), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 1), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember3), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                    put(1, new HashSet<>(Arrays.asList("rack1", "rack2")));
                }
            }));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                }
            }));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 2), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, assignor.targetPartitions(memberId1)), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId3, assignor.targetPartitions(memberId3)), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 2), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember3));
    assertRecordsEquals(expectedRecords, consumerGroupHeartbeatResult.records());
    assertTrue(syncResult.syncFuture.isDone());
    assertEquals(Errors.REBALANCE_IN_PROGRESS.code(), syncResult.syncFuture.get().errorCode());
    context.assertSessionTimeout(groupId, memberId1, 45000);
    context.assertSessionTimeout(groupId, memberId2, 45000);
    context.assertSessionTimeout(groupId, memberId3, 45000);
    context.rollback();
    assertEquals(group, context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false));
}
222430.1545149wildfly
private void parseContainer(XMLExtendedStreamReader reader, PathAddress subsystemAddress, Map<PathAddress, ModelNode> operations) throws XMLStreamException {
    String name = require(reader, XMLAttribute.NAME);
    PathAddress address = subsystemAddress.append(CacheContainerResourceDefinition.pathElement(name));
    ModelNode operation = Util.createAddOperation(address);
    operations.put(address, operation);
    for (int i = 0; i < reader.getAttributeCount(); i++) {
        ParseUtils.requireNoNamespaceAttribute(reader, i);
        XMLAttribute attribute = XMLAttribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case NAME:
                {
                    break;
                }
            case DEFAULT_CACHE:
                {
                    readAttribute(reader, i, operation, CacheContainerResourceDefinition.Attribute.DEFAULT_CACHE);
                    break;
                }
            case JNDI_NAME:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    ClusteringLogger.ROOT_LOGGER.attributeIgnored(attribute.getLocalName(), reader.getLocalName());
                    break;
                }
            case LISTENER_EXECUTOR:
            case EVICTION_EXECUTOR:
            case REPLICATION_QUEUE_EXECUTOR:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_4_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    ClusteringLogger.ROOT_LOGGER.attributeIgnored(attribute.getLocalName(), reader.getLocalName());
                    break;
                }
            case START:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_3_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    ClusteringLogger.ROOT_LOGGER.attributeIgnored(attribute.getLocalName(), reader.getLocalName());
                    break;
                }
            case ALIASES:
                {
                    readAttribute(reader, i, operation, CacheContainerResourceDefinition.ListAttribute.ALIASES);
                    break;
                }
            case MODULE:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_12_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    readAttribute(reader, i, operation, CacheContainerResourceDefinition.ListAttribute.MODULES);
                    break;
                }
            case STATISTICS_ENABLED:
                {
                    readAttribute(reader, i, operation, CacheContainerResourceDefinition.Attribute.STATISTICS_ENABLED);
                    break;
                }
            case MODULES:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_12_0)) {
                        readAttribute(reader, i, operation, CacheContainerResourceDefinition.ListAttribute.MODULES);
                        break;
                    }
                }
            case MARSHALLER:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_13_0)) {
                        readAttribute(reader, i, operation, CacheContainerResourceDefinition.Attribute.MARSHALLER);
                        break;
                    }
                }
            default:
                {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    if (!operation.hasDefined(CacheContainerResourceDefinition.Attribute.MARSHALLER.getName())) {
        if (!this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
            operation.get(CacheContainerResourceDefinition.Attribute.MARSHALLER.getName()).set(new ModelNode(InfinispanMarshallerFactory.LEGACY.name()));
        }
    }
    addRequiredChildOperations(address, CacheContainerResourceDefinition.REQUIRED_CHILDREN, operations);
    addRequiredSingletonChildOperations(address, CacheContainerResourceDefinition.REQUIRED_SINGLETON_CHILDREN, operations);
    while (reader.hasNext() && (reader.nextTag() != XMLStreamConstants.END_ELEMENT)) {
        XMLElement element = XMLElement.forName(reader.getLocalName());
        switch(element) {
            case TRANSPORT:
                {
                    this.parseTransport(reader, address, operations);
                    break;
                }
            case LOCAL_CACHE:
                {
                    this.parseLocalCache(reader, address, operations);
                    break;
                }
            case INVALIDATION_CACHE:
                {
                    this.parseInvalidationCache(reader, address, operations);
                    break;
                }
            case REPLICATED_CACHE:
                {
                    this.parseReplicatedCache(reader, address, operations);
                    break;
                }
            case DISTRIBUTED_CACHE:
                {
                    this.parseDistributedCache(reader, address, operations);
                    break;
                }
            case EXPIRATION_THREAD_POOL:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_4_0)) {
                        this.parseScheduledThreadPool(ScheduledThreadPoolResourceDefinition.EXPIRATION, reader, address, operations);
                        break;
                    }
                }
            case LISTENER_THREAD_POOL:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_4_0)) {
                        this.parseThreadPool(ThreadPoolResourceDefinition.LISTENER, reader, address, operations);
                        break;
                    }
                }
            case ASYNC_OPERATIONS_THREAD_POOL:
            case PERSISTENCE_THREAD_POOL:
            case REMOTE_COMMAND_THREAD_POOL:
            case STATE_TRANSFER_THREAD_POOL:
            case TRANSPORT_THREAD_POOL:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
                        throw ParseUtils.unexpectedElement(reader);
                    }
                    ClusteringLogger.ROOT_LOGGER.elementIgnored(element.getLocalName());
                    ParseUtils.requireNoContent(reader);
                    break;
                }
            case SCATTERED_CACHE:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_6_0)) {
                        this.parseScatteredCache(reader, address, operations);
                        break;
                    }
                }
            case BLOCKING_THREAD_POOL:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0)) {
                        this.parseThreadPool(ThreadPoolResourceDefinition.BLOCKING, reader, address, operations);
                        break;
                    }
                }
            case NON_BLOCKING_THREAD_POOL:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0)) {
                        this.parseThreadPool(ThreadPoolResourceDefinition.NON_BLOCKING, reader, address, operations);
                        break;
                    }
                }
            default:
                {
                    throw ParseUtils.unexpectedElement(reader);
                }
        }
    }
}
223013.334168wildfly
private void parseXADataSource_1_2(XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    boolean enabled = Defaults.ENABLED.booleanValue();
    boolean persistEnabled = true;
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final XaDataSource.Attribute attribute = XaDataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        enabled = Boolean.parseBoolean(value);
                        persistEnabled = true;
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                if (Constants.STATISTICS_ENABLED.getName().equals(reader.getAttributeLocalName(i))) {
                    final String value = rawAttributeText(reader, Constants.STATISTICS_ENABLED.getXmlName());
                    if (value != null) {
                        Constants.STATISTICS_ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                } else {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(XA_DATASOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> xadatasourcePropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.XA_DATASOURCE) {
                        list.add(operation);
                        list.addAll(xadatasourcePropertiesOperations);
                        if (enabled) {
                            final ModelNode enableOperation = new ModelNode();
                            enableOperation.get(OP).set(ENABLE);
                            enableOperation.get(OP_ADDR).set(dsAddress);
                            enableOperation.get(PERSISTENT).set(persistEnabled);
                            list.add(enableOperation);
                        }
                        return;
                    } else {
                        if (XaDataSource.Tag.forName(reader.getLocalName()) == XaDataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(XaDataSource.Tag.forName(reader.getLocalName())) {
                        case XA_DATASOURCE_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(XADATASOURCE_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                XADATASOURCE_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                xadatasourcePropertiesOperations.add(configOperation);
                                break;
                            }
                        case XA_DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                XA_DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case XA_POOL:
                            {
                                parseXaPool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
224143.9736141wildfly
protected void testComponent(String type, String name, boolean expectTimer) throws Exception {
    ModelNode address = getComponentAddress(type, name).toModelNode();
    address.protect();
    ModelNode resourceDescription = executeOperation(managementClient, ModelDescriptionConstants.READ_RESOURCE_DESCRIPTION_OPERATION, address);
    ModelNode resource = executeOperation(managementClient, ModelDescriptionConstants.READ_RESOURCE_OPERATION, address);
    assertTrue(resourceDescription.get(ATTRIBUTES, COMPONENT_CLASS_NAME).isDefined());
    assertEquals(ModelType.STRING, resourceDescription.get(ATTRIBUTES, COMPONENT_CLASS_NAME, DESCRIPTION).getType());
    assertEquals(ModelType.STRING, resourceDescription.get(ATTRIBUTES, COMPONENT_CLASS_NAME, TYPE).asType());
    final ModelNode componentClassNameNode = resource.get(COMPONENT_CLASS_NAME);
    assertTrue(componentClassNameNode.isDefined());
    final String componentClassName = componentClassNameNode.asString();
    validateSecurity(address, resourceDescription, resource);
    if (!STATEFUL.equals(type) && !SINGLETON.equals(type)) {
        validatePool(address, resourceDescription, resource);
    } else {
        for (String attr : POOL_ATTRIBUTES) {
            assertFalse(resourceDescription.get(ModelDescriptionConstants.ATTRIBUTES).has(attr));
            assertFalse(resource.has(attr));
        }
    }
    if (STATELESS.equals(type) || SINGLETON.equals(type) || MESSAGE_DRIVEN.equals(type)) {
        validateTimer(address, resourceDescription, resource, expectTimer);
        assertEquals(TransactionManagementType.CONTAINER.name(), resource.get(TRANSACTION_TYPE).asString());
    } else {
        assertFalse(resourceDescription.get(ModelDescriptionConstants.ATTRIBUTES).has(TIMER_ATTRIBUTE));
        assertFalse(resource.has(TIMER_ATTRIBUTE));
        assertEquals(TransactionManagementType.BEAN.name(), resource.get(TRANSACTION_TYPE).asString());
        if (componentClassName.equals("org.jboss.as.test.integration.ejb.management.deployments.ManagedStatefulBean")) {
            assertTrue(resource.get(PASSIVATION_CAPABLE).asBoolean());
            assertFalse(resource.get(AFTER_BEGIN_METHOD).isDefined());
            assertFalse(resource.get(BEFORE_COMPLETION_METHOD).isDefined());
            assertFalse(resource.get(AFTER_COMPLETION_METHOD).isDefined());
        } else {
            assertFalse(resource.get(PASSIVATION_CAPABLE).asBoolean());
            assertEquals("2 HOURS", resource.get(STATEFUL_TIMEOUT).asString());
            assertEquals("private void afterBegin()", resource.get(AFTER_BEGIN_METHOD).asString());
            assertEquals("private void beforeCompletion()", resource.get(BEFORE_COMPLETION_METHOD).asString());
            assertEquals("private void afterCompletion()", resource.get(AFTER_COMPLETION_METHOD).asString());
            final ModelNode removeMethodsNode = resource.get(REMOVE_METHODS);
            final List<ModelNode> removeMethodsList = removeMethodsNode.asList();
            assertTrue(removeMethodsList.size() == 1 || removeMethodsList.size() == 3);
            for (ModelNode m : removeMethodsList) {
                final String beanMethod = m.get(BEAN_METHOD).asString();
                final boolean retainIfException = m.get(RETAIN_IF_EXCEPTION).asBoolean();
                if (beanMethod.contains("void removeTrue()")) {
                    assertTrue(retainIfException);
                } else if (beanMethod.contains("void removeFalse()") || beanMethod.contains("void remove()")) {
                    assertFalse(retainIfException);
                } else {
                    fail("Unknown stateful bean remove method: " + beanMethod);
                }
            }
        }
    }
    if (SINGLETON.equals(type)) {
        final ModelNode concurrencyTypeNode = resource.get(CONCURRENCY_MANAGEMENT_TYPE);
        final ModelNode initOnStartUpNode = resource.get(INIT_ON_STARTUP);
        final ModelNode dependsOnNode = resource.get(DEPENDS_ON);
        if (componentClassName.equals("org.jboss.as.test.integration.ejb.management.deployments.ManagedSingletonBean")) {
            assertFalse(initOnStartUpNode.asBoolean());
            assertFalse(dependsOnNode.isDefined());
            assertFalse(concurrencyTypeNode.isDefined());
        } else {
            assertTrue(initOnStartUpNode.asBoolean());
            assertEquals(ConcurrencyManagementType.BEAN.name(), concurrencyTypeNode.asString());
            final List<ModelNode> dependsOnList = dependsOnNode.asList();
            assertEquals(1, dependsOnList.size());
            for (ModelNode d : dependsOnList) {
                if (!d.asString().equals("ManagedSingletonBean")) {
                    fail("Unknown value of depends-on: " + d.asString());
                }
            }
        }
    }
    if (MESSAGE_DRIVEN.equals(type)) {
        assertEquals("jakarta.jms.MessageListener", resource.get(MESSAGING_TYPE).asString());
        if (componentClassName.equals("org.jboss.as.test.integration.ejb.management.deployments.NoTimerMDB")) {
            assertEquals("jakarta.jms.Queue", resource.get(MESSAGE_DESTINATION_TYPE).asString());
            assertEquals("queue/NoTimerMDB-queue", resource.get(MESSAGE_DESTINATION_LINK).asString());
        }
        final ModelNode activationConfigNode = resource.get(ACTIVATION_CONFIG);
        assertTrue(activationConfigNode.isDefined());
        final List<Property> activationConfigProps = activationConfigNode.asPropertyList();
        assertTrue(activationConfigProps.size() >= 2);
        for (Property p : activationConfigProps) {
            final String pName = p.getName();
            final String pValue = p.getValue().asString();
            switch(pName) {
                case "destinationType":
                    assertEquals("jakarta.jms.Queue", pValue);
                    break;
                case "destination":
                    assertTrue(pValue.startsWith("java:/queue/"));
                    break;
                case "acknowledgeMode":
                    assertEquals("Auto-acknowledge", pValue);
                    break;
                default:
                    fail("Unknown activation config property: " + pName);
                    break;
            }
        }
    } else {
        if (expectTimer) {
            final ModelNode asyncMethodsNode = resource.get(ASYNC_METHODS);
            final List<ModelNode> asyncMethodsList = asyncMethodsNode.asList();
            assertEquals(1, asyncMethodsList.size());
            for (ModelNode m : asyncMethodsList) {
                if (!m.asString().contains("void async(int, int)")) {
                    fail("Unknown async methods: " + m.asString());
                }
            }
        }
        final ModelNode businessRemoteNode = resource.get(BUSINESS_REMOTE);
        final List<ModelNode> businessRemoteList = businessRemoteNode.asList();
        assertEquals(1, businessRemoteList.size());
        for (ModelNode r : businessRemoteList) {
            if (!r.asString().equals("org.jboss.as.test.integration.ejb.management.deployments.BusinessInterface")) {
                fail("Unknown business remote interface: " + r.asString());
            }
        }
        final ModelNode businessLocalNode = resource.get(BUSINESS_LOCAL);
        final List<ModelNode> businessLocalList = businessLocalNode.asList();
        assertEquals(1, businessLocalList.size());
        for (ModelNode l : businessLocalList) {
            if (!l.asString().equals(componentClassName)) {
                fail("Unknown business local interface: " + l.asString());
            }
        }
        final ModelNode jndiNamesNode = resource.get(JNDI_NAMES);
        final List<ModelNode> jndiNamesList = jndiNamesNode.asList();
        assertTrue(jndiNamesList.size() >= 6);
        for (ModelNode j : jndiNamesList) {
            final String n = j.asString();
            if (!(n.startsWith("java:global/") || n.startsWith("java:app/") || n.startsWith("java:module/") || n.startsWith("ejb:/") || n.startsWith("java:jboss/"))) {
                fail("Unknown jndi name for " + name + ": " + n);
            }
        }
    }
}
237453.021173cassandra
public void testSingleOperations() throws Throwable {
    createTable("CREATE TABLE %s (a tinyint, b smallint, c int, d bigint, e float, f double, g varint, h decimal, PRIMARY KEY(a, b, c))");
    execute("INSERT INTO %S (a, b, c, d, e, f, g, h) VALUES (1, 2, 3, 4, 5.5, 6.5, 7, 8.5)");
    assertColumnNames(execute("SELECT a + a, b + a, c + a, d + a, e + a, f + a, g + a, h + a FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), "a + a", "b + a", "c + a", "d + a", "e + a", "f + a", "g + a", "h + a");
    assertRows(execute("SELECT a + a, b + a, c + a, d + a, e + a, f + a, g + a, h + a FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row((byte) 2, (short) 3, 4, 5L, 6.5F, 7.5, BigInteger.valueOf(8), BigDecimal.valueOf(9.5)));
    assertRows(execute("SELECT a + b, b + b, c + b, d + b, e + b, f + b, g + b, h + b FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row((short) 3, (short) 4, 5, 6L, 7.5F, 8.5, BigInteger.valueOf(9), BigDecimal.valueOf(10.5)));
    assertRows(execute("SELECT a + c, b + c, c + c, d + c, e + c, f + c, g + c, h + c FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(4, 5, 6, 7L, 8.5F, 9.5, BigInteger.valueOf(10), BigDecimal.valueOf(11.5)));
    assertRows(execute("SELECT a + d, b + d, c + d, d + d, e + d, f + d, g + d, h + d FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(5L, 6L, 7L, 8L, 9.5, 10.5, BigInteger.valueOf(11), BigDecimal.valueOf(12.5)));
    assertRows(execute("SELECT a + e, b + e, c + e, d + e, e + e, f + e, g + e, h + e FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(6.5F, 7.5F, 8.5F, 9.5, 11.0F, 12.0, BigDecimal.valueOf(12.5), BigDecimal.valueOf(14.0)));
    assertRows(execute("SELECT a + f, b + f, c + f, d + f, e + f, f + f, g + f, h + f FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(7.5, 8.5, 9.5, 10.5, 12.0, 13.0, BigDecimal.valueOf(13.5), BigDecimal.valueOf(15.0)));
    assertRows(execute("SELECT a + g, b + g, c + g, d + g, e + g, f + g, g + g, h + g FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(BigInteger.valueOf(8), BigInteger.valueOf(9), BigInteger.valueOf(10), BigInteger.valueOf(11), BigDecimal.valueOf(12.5), BigDecimal.valueOf(13.5), BigInteger.valueOf(14), BigDecimal.valueOf(15.5)));
    assertRows(execute("SELECT a + h, b + h, c + h, d + h, e + h, f + h, g + h, h + h FROM %s WHERE a = 1 AND b = 2 AND c = 1 + 2"), row(BigDecimal.valueOf(9.5), BigDecimal.valueOf(10.5), BigDecimal.valueOf(11.5), BigDecimal.valueOf(12.5), BigDecimal.valueOf(14.0), BigDecimal.valueOf(15.0), BigDecimal.valueOf(15.5), BigDecimal.valueOf(17.0)));
    assertColumnNames(execute("SELECT a - a, b - a, c - a, d - a, e - a, f - a, g - a, h - a FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), "a - a", "b - a", "c - a", "d - a", "e - a", "f - a", "g - a", "h - a");
    assertRows(execute("SELECT a - a, b - a, c - a, d - a, e - a, f - a, g - a, h - a FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row((byte) 0, (short) 1, 2, 3L, 4.5F, 5.5, BigInteger.valueOf(6), BigDecimal.valueOf(7.5)));
    assertRows(execute("SELECT a - b, b - b, c - b, d - b, e - b, f - b, g - b, h - b FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row((short) -1, (short) 0, 1, 2L, 3.5F, 4.5, BigInteger.valueOf(5), BigDecimal.valueOf(6.5)));
    assertRows(execute("SELECT a - c, b - c, c - c, d - c, e - c, f - c, g - c, h - c FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(-2, -1, 0, 1L, 2.5F, 3.5, BigInteger.valueOf(4), BigDecimal.valueOf(5.5)));
    assertRows(execute("SELECT a - d, b - d, c - d, d - d, e - d, f - d, g - d, h - d FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(-3L, -2L, -1L, 0L, 1.5, 2.5, BigInteger.valueOf(3), BigDecimal.valueOf(4.5)));
    assertRows(execute("SELECT a - e, b - e, c - e, d - e, e - e, f - e, g - e, h - e FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(-4.5F, -3.5F, -2.5F, -1.5, 0.0F, 1.0, BigDecimal.valueOf(1.5), BigDecimal.valueOf(3.0)));
    assertRows(execute("SELECT a - f, b - f, c - f, d - f, e - f, f - f, g - f, h - f FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(-5.5, -4.5, -3.5, -2.5, -1.0, 0.0, BigDecimal.valueOf(0.5), BigDecimal.valueOf(2.0)));
    assertRows(execute("SELECT a - g, b - g, c - g, d - g, e - g, f - g, g - g, h - g FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(BigInteger.valueOf(-6), BigInteger.valueOf(-5), BigInteger.valueOf(-4), BigInteger.valueOf(-3), BigDecimal.valueOf(-1.5), BigDecimal.valueOf(-0.5), BigInteger.valueOf(0), BigDecimal.valueOf(1.5)));
    assertRows(execute("SELECT a - h, b - h, c - h, d - h, e - h, f - h, g - h, h - h FROM %s WHERE a = 1 AND b = 2 AND c = 4 - 1"), row(BigDecimal.valueOf(-7.5), BigDecimal.valueOf(-6.5), BigDecimal.valueOf(-5.5), BigDecimal.valueOf(-4.5), BigDecimal.valueOf(-3.0), BigDecimal.valueOf(-2.0), BigDecimal.valueOf(-1.5), BigDecimal.valueOf(0.0)));
    assertColumnNames(execute("SELECT a * a, b * a, c * a, d * a, e * a, f * a, g * a, h * a FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), "a * a", "b * a", "c * a", "d * a", "e * a", "f * a", "g * a", "h * a");
    assertRows(execute("SELECT a * a, b * a, c * a, d * a, e * a, f * a, g * a, h * a FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row((byte) 1, (short) 2, 3, 4L, 5.5F, 6.5, BigInteger.valueOf(7), new BigDecimal("8.50")));
    assertRows(execute("SELECT a * b, b * b, c * b, d * b, e * b, f * b, g * b, h * b FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row((short) 2, (short) 4, 6, 8L, 11.0F, 13.0, BigInteger.valueOf(14), new BigDecimal("17.00")));
    assertRows(execute("SELECT a * c, b * c, c * c, d * c, e * c, f * c, g * c, h * c FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(3, 6, 9, 12L, 16.5F, 19.5, BigInteger.valueOf(21), new BigDecimal("25.50")));
    assertRows(execute("SELECT a * d, b * d, c * d, d * d, e * d, f * d, g * d, h * d FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(4L, 8L, 12L, 16L, 22.0, 26.0, BigInteger.valueOf(28), new BigDecimal("34.00")));
    assertRows(execute("SELECT a * e, b * e, c * e, d * e, e * e, f * e, g * e, h * e FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(5.5F, 11.0F, 16.5F, 22.0, 30.25F, 35.75, new BigDecimal("38.5"), new BigDecimal("46.75")));
    assertRows(execute("SELECT a * f, b * f, c * f, d * f, e * f, f * f, g * f, h * f FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(6.5, 13.0, 19.5, 26.0, 35.75, 42.25, new BigDecimal("45.5"), BigDecimal.valueOf(55.25)));
    assertRows(execute("SELECT a * g, b * g, c * g, d * g, e * g, f * g, g * g, h * g FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(BigInteger.valueOf(7), BigInteger.valueOf(14), BigInteger.valueOf(21), BigInteger.valueOf(28), new BigDecimal("38.5"), new BigDecimal("45.5"), BigInteger.valueOf(49), new BigDecimal("59.5")));
    assertRows(execute("SELECT a * h, b * h, c * h, d * h, e * h, f * h, g * h, h * h FROM %s WHERE a = 1 AND b = 2 AND c = 3 * 1"), row(new BigDecimal("8.50"), new BigDecimal("17.00"), new BigDecimal("25.50"), new BigDecimal("34.00"), new BigDecimal("46.75"), new BigDecimal("55.25"), new BigDecimal("59.5"), new BigDecimal("72.25")));
    assertColumnNames(execute("SELECT a / a, b / a, c / a, d / a, e / a, f / a, g / a, h / a FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), "a / a", "b / a", "c / a", "d / a", "e / a", "f / a", "g / a", "h / a");
    assertRows(execute("SELECT a / a, b / a, c / a, d / a, e / a, f / a, g / a, h / a FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row((byte) 1, (short) 2, 3, 4L, 5.5F, 6.5, BigInteger.valueOf(7), new BigDecimal("8.5")));
    assertRows(execute("SELECT a / b, b / b, c / b, d / b, e / b, f / b, g / b, h / b FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row((short) 0, (short) 1, 1, 2L, 2.75F, 3.25, BigInteger.valueOf(3), new BigDecimal("4.25")));
    assertRows(execute("SELECT a / c, b / c, c / c, d / c, e / c, f / c, g / c, h / c FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(0, 0, 1, 1L, 1.8333334F, 2.1666666666666665, BigInteger.valueOf(2), new BigDecimal("2.83333333333333333333333333333333")));
    assertRows(execute("SELECT a / d, b / d, c / d, d / d, e / d, f / d, g / d, h / d FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(0L, 0L, 0L, 1L, 1.375, 1.625, BigInteger.valueOf(1), new BigDecimal("2.125")));
    assertRows(execute("SELECT a / e, b / e, c / e, d / e, e / e, f / e, g / e, h / e FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(0.18181819F, 0.36363637F, 0.54545456F, 0.7272727272727273, 1.0F, 1.1818181818181819, new BigDecimal("1.27272727272727272727272727272727"), new BigDecimal("1.54545454545454545454545454545455")));
    assertRows(execute("SELECT a / f, b / f, c / f, d / f, e / f, f / f, g / f, h / f FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(0.15384615384615385, 0.3076923076923077, 0.46153846153846156, 0.6153846153846154, 0.8461538461538461, 1.0, new BigDecimal("1.07692307692307692307692307692308"), new BigDecimal("1.30769230769230769230769230769231")));
    assertRows(execute("SELECT a / g, b / g, c / g, d / g, e / g, f / g, g / g, h / g FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(BigInteger.valueOf(0), BigInteger.valueOf(0), BigInteger.valueOf(0), BigInteger.valueOf(0), new BigDecimal("0.78571428571428571428571428571429"), new BigDecimal("0.92857142857142857142857142857143"), BigInteger.valueOf(1), new BigDecimal("1.21428571428571428571428571428571")));
    assertRows(execute("SELECT a / h, b / h, c / h, d / h, e / h, f / h, g / h, h / h FROM %s WHERE a = 1 AND b = 2 AND c = 3 / 1"), row(new BigDecimal("0.11764705882352941176470588235294"), new BigDecimal("0.23529411764705882352941176470588"), new BigDecimal("0.35294117647058823529411764705882"), new BigDecimal("0.47058823529411764705882352941176"), new BigDecimal("0.64705882352941176470588235294118"), new BigDecimal("0.76470588235294117647058823529412"), new BigDecimal("0.82352941176470588235294117647059"), new BigDecimal("1")));
    assertColumnNames(execute("SELECT a %% a, b %% a, c %% a, d %% a, e %% a, f %% a, g %% a, h %% a FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), "a % a", "b % a", "c % a", "d % a", "e % a", "f % a", "g % a", "h % a");
    assertRows(execute("SELECT a %% a, b %% a, c %% a, d %% a, e %% a, f %% a, g %% a, h %% a FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row((byte) 0, (short) 0, 0, 0L, 0.5F, 0.5, BigInteger.valueOf(0), new BigDecimal("0.5")));
    assertRows(execute("SELECT a %% b, b %% b, c %% b, d %% b, e %% b, f %% b, g %% b, h %% b FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row((short) 1, (short) 0, 1, 0L, 1.5F, 0.5, BigInteger.valueOf(1), new BigDecimal("0.5")));
    assertRows(execute("SELECT a %% c, b %% c, c %% c, d %% c, e %% c, f %% c, g %% c, h %% c FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(1, 2, 0, 1L, 2.5F, 0.5, BigInteger.valueOf(1), new BigDecimal("2.5")));
    assertRows(execute("SELECT a %% d, b %% d, c %% d, d %% d, e %% d, f %% d, g %% d, h %% d FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(1L, 2L, 3L, 0L, 1.5, 2.5, BigInteger.valueOf(3), new BigDecimal("0.5")));
    assertRows(execute("SELECT a %% e, b %% e, c %% e, d %% e, e %% e, f %% e, g %% e, h %% e FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(1.0F, 2.0F, 3.0F, 4.0, 0.0F, 1.0, new BigDecimal("1.5"), new BigDecimal("3.0")));
    assertRows(execute("SELECT a %% f, b %% f, c %% f, d %% f, e %% f, f %% f, g %% f, h %% f FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(1.0, 2.0, 3.0, 4.0, 5.5, 0.0, new BigDecimal("0.5"), new BigDecimal("2.0")));
    assertRows(execute("SELECT a %% g, b %% g, c %% g, d %% g, e %% g, f %% g, g %% g, h %% g FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(BigInteger.valueOf(1), BigInteger.valueOf(2), BigInteger.valueOf(3), BigInteger.valueOf(4), new BigDecimal("5.5"), new BigDecimal("6.5"), BigInteger.valueOf(0), new BigDecimal("1.5")));
    assertRows(execute("SELECT a %% h, b %% h, c %% h, d %% h, e %% h, f %% h, g %% h, h %% h FROM %s WHERE a = 1 AND b = 2 AND c = 23 %% 5"), row(new BigDecimal("1.0"), new BigDecimal("2.0"), new BigDecimal("3.0"), new BigDecimal("4.0"), new BigDecimal("5.5"), new BigDecimal("6.5"), new BigDecimal("7"), new BigDecimal("0.0")));
    assertColumnNames(execute("SELECT -a, -b, -c, -d, -e, -f, -g, -h FROM %s WHERE a = 1 AND b = 2"), "-a", "-b", "-c", "-d", "-e", "-f", "-g", "-h");
    assertRows(execute("SELECT -a, -b, -c, -d, -e, -f, -g, -h FROM %s WHERE a = 1 AND b = 2"), row((byte) -1, (short) -2, -3, -4L, -5.5F, -6.5, BigInteger.valueOf(-7), new BigDecimal("-8.5")));
    execute("UPDATE %s SET d = ? WHERE a = ? AND b = ? AND c = ?", null, (byte) 1, (short) 2, 3);
    assertRows(execute("SELECT a + d, b + d, c + d, d + d, e + d, f + d, g + d, h + d FROM %s WHERE a = 1 AND b = 2"), row(null, null, null, null, null, null, null, null));
}
236786.661177cassandra
public void testBatchStaticTTLConditionalInteraction() throws Throwable {
    createTable(String.format("CREATE TABLE %s.clustering_static (\n" + "  id int,\n" + "  clustering1 int,\n" + "  clustering2 int,\n" + "  clustering3 int,\n" + "  sval int static, \n" + "  val int, \n" + " PRIMARY KEY(id, clustering1, clustering2, clustering3)" + ")", KEYSPACE));
    execute("DELETE FROM " + KEYSPACE + ".clustering_static WHERE id=1");
    String clusteringInsert = "INSERT INTO " + KEYSPACE + ".clustering_static(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s); ";
    String clusteringTTLInsert = "INSERT INTO " + KEYSPACE + ".clustering_static(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s) USING TTL %s; ";
    String clusteringStaticInsert = "INSERT INTO " + KEYSPACE + ".clustering_static(id, clustering1, clustering2, clustering3, sval, val) VALUES(%s, %s, %s, %s, %s, %s); ";
    String clusteringConditionalInsert = "INSERT INTO " + KEYSPACE + ".clustering_static(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s) IF NOT EXISTS; ";
    String clusteringConditionalTTLInsert = "INSERT INTO " + KEYSPACE + ".clustering_static(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s)  IF NOT EXISTS USING TTL %s; ";
    String clusteringUpdate = "UPDATE " + KEYSPACE + ".clustering_static SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringStaticUpdate = "UPDATE " + KEYSPACE + ".clustering_static SET sval=%s WHERE id=%s ;";
    String clusteringTTLUpdate = "UPDATE " + KEYSPACE + ".clustering_static USING TTL %s SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringStaticConditionalUpdate = "UPDATE " + KEYSPACE + ".clustering_static SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF sval=%s ;";
    String clusteringConditionalTTLUpdate = "UPDATE " + KEYSPACE + ".clustering_static USING TTL %s SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF val=%s ;";
    String clusteringStaticConditionalTTLUpdate = "UPDATE " + KEYSPACE + ".clustering_static USING TTL %s SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF sval=%s ;";
    String clusteringStaticConditionalStaticUpdate = "UPDATE " + KEYSPACE + ".clustering_static SET sval=%s WHERE id=%s IF sval=%s; ";
    String clusteringDelete = "DELETE FROM " + KEYSPACE + ".clustering_static WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringRangeDelete = "DELETE FROM " + KEYSPACE + ".clustering_static WHERE id=%s AND clustering1=%s ;";
    String clusteringConditionalDelete = "DELETE FROM " + KEYSPACE + ".clustering_static WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF val=%s ; ";
    String clusteringStaticConditionalDelete = "DELETE FROM " + KEYSPACE + ".clustering_static WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF sval=%s ; ";
    execute("BEGIN BATCH " + String.format(clusteringStaticInsert, 1, 1, 1, 1, 1, 1) + " APPLY BATCH");
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 1, 1, 1));
    StringBuilder cmd2 = new StringBuilder();
    cmd2.append("BEGIN BATCH ");
    cmd2.append(String.format(clusteringInsert, 1, 1, 1, 2, 2));
    cmd2.append(String.format(clusteringStaticConditionalUpdate, 11, 1, 1, 1, 1, 1));
    cmd2.append("APPLY BATCH ");
    execute(cmd2.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 1, 1, 11), row(1, 1, 1, 2, 1, 2));
    StringBuilder cmd3 = new StringBuilder();
    cmd3.append("BEGIN BATCH ");
    cmd3.append(String.format(clusteringInsert, 1, 1, 2, 3, 23));
    cmd3.append(String.format(clusteringStaticUpdate, 22, 1));
    cmd3.append(String.format(clusteringDelete, 1, 1, 1, 1));
    cmd3.append("APPLY BATCH ");
    execute(cmd3.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, 2), row(1, 1, 2, 3, 22, 23));
    StringBuilder cmd4 = new StringBuilder();
    cmd4.append("BEGIN BATCH ");
    cmd4.append(String.format(clusteringInsert, 1, 2, 3, 4, 1234));
    cmd4.append(String.format(clusteringStaticConditionalTTLUpdate, 5, 234, 1, 1, 1, 2, 22));
    cmd4.append("APPLY BATCH ");
    execute(cmd4.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, 234), row(1, 1, 2, 3, 22, 23), row(1, 2, 3, 4, 22, 1234));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, null), row(1, 1, 2, 3, 22, 23), row(1, 2, 3, 4, 22, 1234));
    StringBuilder cmd5 = new StringBuilder();
    cmd5.append("BEGIN BATCH ");
    cmd5.append(String.format(clusteringRangeDelete, 1, 2));
    cmd5.append(String.format(clusteringStaticConditionalUpdate, 1234, 1, 1, 1, 2, 22));
    cmd5.append("APPLY BATCH ");
    execute(cmd5.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, 1234), row(1, 1, 2, 3, 22, 23));
    StringBuilder cmd6 = new StringBuilder();
    cmd6.append("BEGIN BATCH ");
    cmd6.append(String.format(clusteringUpdate, 345, 1, 3, 4, 5));
    cmd6.append(String.format(clusteringStaticConditionalUpdate, 1, 1, 1, 1, 2, 22));
    cmd6.append("APPLY BATCH ");
    execute(cmd6.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, 1), row(1, 1, 2, 3, 22, 23), row(1, 3, 4, 5, 22, 345));
    StringBuilder cmd7 = new StringBuilder();
    cmd7.append("BEGIN BATCH ");
    cmd7.append(String.format(clusteringDelete, 1, 3, 4, 5));
    cmd7.append(String.format(clusteringStaticConditionalUpdate, 2300, 1, 1, 2, 3, 1));
    cmd7.append("APPLY BATCH ");
    execute(cmd7.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 1, 1, 2, 22, 1), row(1, 1, 2, 3, 22, 23), row(1, 3, 4, 5, 22, 345));
    StringBuilder cmd8 = new StringBuilder();
    cmd8.append("BEGIN BATCH ");
    cmd8.append(String.format(clusteringConditionalDelete, 1, 3, 4, 5, 345));
    cmd8.append(String.format(clusteringRangeDelete, 1, 1));
    cmd8.append(String.format(clusteringInsert, 1, 2, 3, 4, 5));
    cmd8.append("APPLY BATCH ");
    execute(cmd8.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 2, 3, 4, 22, 5));
    StringBuilder cmd9 = new StringBuilder();
    cmd9.append("BEGIN BATCH ");
    cmd9.append(String.format(clusteringConditionalInsert, 1, 3, 4, 5, 345));
    cmd9.append(String.format(clusteringDelete, 1, 2, 3, 4));
    cmd9.append("APPLY BATCH ");
    execute(cmd9.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 3, 4, 5, 22, 345));
    StringBuilder cmd10 = new StringBuilder();
    cmd10.append("BEGIN BATCH ");
    cmd10.append(String.format(clusteringTTLInsert, 1, 2, 3, 4, 5, 5));
    cmd10.append(String.format(clusteringConditionalTTLUpdate, 10, 5, 1, 3, 4, 5, 345));
    cmd10.append("APPLY BATCH ");
    execute(cmd10.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 2, 3, 4, 22, 5), row(1, 3, 4, 5, 22, 5));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 3, 4, 5, 22, 5));
    StringBuilder cmd11 = new StringBuilder();
    cmd11.append("BEGIN BATCH ");
    cmd11.append(String.format(clusteringConditionalTTLInsert, 1, 2, 3, 4, 5, 5));
    cmd11.append(String.format(clusteringInsert, 1, 4, 5, 6, 7));
    cmd11.append("APPLY BATCH ");
    execute(cmd11.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 2, 3, 4, 22, 5), row(1, 3, 4, 5, 22, 5), row(1, 4, 5, 6, 22, 7));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 3, 4, 5, 22, null), row(1, 4, 5, 6, 22, 7));
    StringBuilder cmd12 = new StringBuilder();
    cmd12.append("BEGIN BATCH ");
    cmd12.append(String.format(clusteringConditionalTTLUpdate, 5, 5, 1, 3, 4, 5, null));
    cmd12.append(String.format(clusteringTTLUpdate, 5, 8, 1, 4, 5, 6));
    cmd12.append("APPLY BATCH ");
    execute(cmd12.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 3, 4, 5, 22, 5), row(1, 4, 5, 6, 22, 8));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 3, 4, 5, 22, null), row(1, 4, 5, 6, 22, null));
    StringBuilder cmd13 = new StringBuilder();
    cmd13.append("BEGIN BATCH ");
    cmd13.append(String.format(clusteringStaticConditionalDelete, 1, 3, 4, 5, 22));
    cmd13.append(String.format(clusteringInsert, 1, 2, 3, 4, 5));
    cmd13.append("APPLY BATCH ");
    execute(cmd13.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 2, 3, 4, 22, 5), row(1, 4, 5, 6, 22, null));
    StringBuilder cmd14 = new StringBuilder();
    cmd14.append("BEGIN BATCH ");
    cmd14.append(String.format(clusteringStaticConditionalStaticUpdate, 23, 1, 22));
    cmd14.append(String.format(clusteringDelete, 1, 4, 5, 6));
    cmd14.append("APPLY BATCH ");
    execute(cmd14.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering_static WHERE id=1"), row(1, 2, 3, 4, 23, 5));
}
235301.8515158cassandra
public void testViewFiltering(boolean flush) throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c int, d int, PRIMARY KEY (a))");
    String mv1 = createView("CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s " + "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1  PRIMARY KEY (a, b)");
    String mv2 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c, d FROM %s " + "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 and d = 1 PRIMARY KEY (a, b)");
    String mv3 = createView("CREATE MATERIALIZED VIEW %s AS SELECT a, b, c, d FROM %%s " + "WHERE a IS NOT NULL AND b IS NOT NULL PRIMARY KEY (a, b)");
    String mv4 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " + "WHERE a IS NOT NULL AND b IS NOT NULL and c = 1 PRIMARY KEY (a, b)");
    String mv5 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " + "WHERE a IS NOT NULL and d = 1 PRIMARY KEY (a, d)");
    String mv6 = createView("CREATE MATERIALIZED VIEW %s AS SELECT c FROM %s " + "WHERE a = 1 and d IS NOT NULL PRIMARY KEY (a, d)");
    Keyspace ks = Keyspace.open(keyspace());
    ks.getColumnFamilyStore(mv1).disableAutoCompaction();
    ks.getColumnFamilyStore(mv2).disableAutoCompaction();
    ks.getColumnFamilyStore(mv3).disableAutoCompaction();
    ks.getColumnFamilyStore(mv4).disableAutoCompaction();
    ks.getColumnFamilyStore(mv5).disableAutoCompaction();
    ks.getColumnFamilyStore(mv6).disableAutoCompaction();
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 0", 1, 1, 1, 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
    updateView("UPDATE %s using timestamp 1 set c = ? WHERE a=?", 0, 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 1));
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 0));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 0));
    updateView("UPDATE %s using timestamp 2 set c = ? WHERE a=?", 1, 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
    updateView("UPDATE %s using timestamp 3 set d = ? WHERE a=?", 0, 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 0));
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 0));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRowCount(execute("SELECT * FROM " + mv5), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 0, 1));
    updateView("UPDATE %s using timestamp 4 set c = ? WHERE a=?", 0, 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 0));
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowCount(execute("SELECT * FROM " + mv5), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 0, 0));
    updateView("UPDATE %s using timestamp 5 set d = ? WHERE a=?", 1, 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 0, 1));
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 0));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 0));
    updateView("UPDATE %s using timestamp 6 set c = ? WHERE a=?", 1, 1);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
    updateView("UPDATE %s using timestamp 7 set b = ? WHERE a=?", 2, 1);
    if (flush) {
        Util.flush(ks);
        for (String view : getViews()) ks.getColumnFamilyStore(view).forceMajorCompaction();
    }
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 2, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 2, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 2, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 2, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
    updateView("DELETE b, c FROM %s using timestamp 6 WHERE a=?", 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM %s"), row(1, 2, null, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 2, null, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, null));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, null));
    updateView("DELETE FROM %s using timestamp 8 where a=?", 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowCount(execute("SELECT * FROM " + mv3), 0);
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowCount(execute("SELECT * FROM " + mv5), 0);
    assertRowCount(execute("SELECT * FROM " + mv6), 0);
    updateView("UPDATE %s using timestamp 9 set b = ?,c = ? where a=?", 1, 1, 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, null));
    assertRows(execute("SELECT * FROM " + mv2));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, null));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRows(execute("SELECT * FROM " + mv5));
    assertRows(execute("SELECT * FROM " + mv6));
    updateView("DELETE FROM %s using timestamp 10 where a=?", 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowCount(execute("SELECT * FROM " + mv3), 0);
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowCount(execute("SELECT * FROM " + mv5), 0);
    assertRowCount(execute("SELECT * FROM " + mv6), 0);
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?) using timestamp 11", 1, 1, 1, 1);
    if (flush)
        Util.flush(ks);
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv1), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv2), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv3), row(1, 1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv4), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv5), row(1, 1, 1));
    assertRowsIgnoringOrder(execute("SELECT * FROM " + mv6), row(1, 1, 1));
    updateView("DELETE FROM %s using timestamp 12 where a=?", 1);
    if (flush)
        Util.flush(ks);
    assertRowCount(execute("SELECT * FROM " + mv1), 0);
    assertRowCount(execute("SELECT * FROM " + mv2), 0);
    assertRowCount(execute("SELECT * FROM " + mv3), 0);
    assertRowCount(execute("SELECT * FROM " + mv4), 0);
    assertRowCount(execute("SELECT * FROM " + mv5), 0);
    assertRowCount(execute("SELECT * FROM " + mv6), 0);
    dropView(mv1);
    dropView(mv2);
    dropView(mv3);
    dropView(mv4);
    dropView(mv5);
    dropView(mv6);
    dropTable("DROP TABLE %s");
}
236396.883181elasticsearch
public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Exception {
    String dataStreamName = "logs";
    Index firstGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 1), UUIDs.base64UUID());
    Index secondGenerationIndex = new Index(getDefaultBackingIndexName(dataStreamName, 2), UUIDs.base64UUID());
    Index writeIndex = new Index(getDefaultBackingIndexName(dataStreamName, 3), UUIDs.base64UUID());
    Index failureStoreIndex = new Index(getDefaultFailureStoreName(dataStreamName, 1, System.currentTimeMillis()), UUIDs.base64UUID());
    List<Index> indices = List.of(firstGenerationIndex, secondGenerationIndex, writeIndex);
    List<Index> failureStores = List.of(failureStoreIndex);
    {
        DataStream logs = DataStream.builder("logs", indices).setGeneration(3).setAllowCustomRouting(true).setIndexMode(IndexMode.STANDARD).setLifecycle(new DataStreamLifecycle()).setFailureStoreEnabled(true).setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()).build();
        String ilmPolicyName = "rollover-30days";
        Map<Index, Response.IndexProperties> indexSettingsValues = Map.of(firstGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), secondGenerationIndex, new Response.IndexProperties(false, ilmPolicyName, ManagedBy.LIFECYCLE), writeIndex, new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE), failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.LIFECYCLE));
        Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo(logs, ClusterHealthStatus.GREEN, "index-template", null, null, indexSettingsValues, false);
        Response response = new Response(List.of(dataStreamInfo));
        XContentBuilder contentBuilder = XContentFactory.jsonBuilder();
        response.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS);
        BytesReference bytes = BytesReference.bytes(contentBuilder);
        try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) {
            Map<String, Object> map = parser.map();
            List<Object> dataStreams = (List<Object>) map.get(Response.DATA_STREAMS_FIELD.getPreferredName());
            assertThat(dataStreams.size(), is(1));
            Map<String, Object> dataStreamMap = (Map<String, Object>) dataStreams.get(0);
            assertThat(dataStreamMap.get(DataStream.NAME_FIELD.getPreferredName()), is(dataStreamName));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.LIFECYCLE_FIELD.getPreferredName()), is(Map.of("enabled", true)));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.NEXT_GENERATION_INDEX_MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue));
            List<Object> indicesRepresentation = (List<Object>) dataStreamMap.get(DataStream.INDICES_FIELD.getPreferredName());
            Map<String, Object> firstGenIndexRepresentation = (Map<String, Object>) indicesRepresentation.get(0);
            assertThat(firstGenIndexRepresentation.get("index_name"), is(firstGenerationIndex.getName()));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(true));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(ilmPolicyName));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.ILM.displayValue));
            Map<String, Object> secondGenIndexRepresentation = (Map<String, Object>) indicesRepresentation.get(1);
            assertThat(secondGenIndexRepresentation.get("index_name"), is(secondGenerationIndex.getName()));
            assertThat(secondGenIndexRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
            assertThat(secondGenIndexRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(ilmPolicyName));
            assertThat(secondGenIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue));
            Map<String, Object> writeIndexRepresentation = (Map<String, Object>) indicesRepresentation.get(2);
            assertThat(writeIndexRepresentation.get("index_name"), is(writeIndex.getName()));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue));
            if (DataStream.isFailureStoreFeatureFlagEnabled()) {
                var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
                List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
                Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
                assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.LIFECYCLE.displayValue));
            }
        }
    }
    {
        DataStream logs = DataStream.builder("logs", indices).setGeneration(3).setAllowCustomRouting(true).setIndexMode(IndexMode.STANDARD).setLifecycle(new DataStreamLifecycle(null, null, false)).setFailureStoreEnabled(true).setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()).build();
        String ilmPolicyName = "rollover-30days";
        Map<Index, Response.IndexProperties> indexSettingsValues = Map.of(firstGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), secondGenerationIndex, new Response.IndexProperties(true, ilmPolicyName, ManagedBy.ILM), writeIndex, new Response.IndexProperties(false, null, ManagedBy.UNMANAGED), failureStoreIndex, new Response.IndexProperties(false, null, ManagedBy.UNMANAGED));
        Response.DataStreamInfo dataStreamInfo = new Response.DataStreamInfo(logs, ClusterHealthStatus.GREEN, "index-template", null, null, indexSettingsValues, false);
        Response response = new Response(List.of(dataStreamInfo));
        XContentBuilder contentBuilder = XContentFactory.jsonBuilder();
        response.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS);
        BytesReference bytes = BytesReference.bytes(contentBuilder);
        try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) {
            Map<String, Object> map = parser.map();
            List<Object> dataStreams = (List<Object>) map.get(Response.DATA_STREAMS_FIELD.getPreferredName());
            assertThat(dataStreams.size(), is(1));
            Map<String, Object> dataStreamMap = (Map<String, Object>) dataStreams.get(0);
            assertThat(dataStreamMap.get(DataStream.NAME_FIELD.getPreferredName()), is(dataStreamName));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.LIFECYCLE_FIELD.getPreferredName()), is(Map.of("enabled", false)));
            assertThat(dataStreamMap.get(Response.DataStreamInfo.NEXT_GENERATION_INDEX_MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue));
            List<Object> indicesRepresentation = (List<Object>) dataStreamMap.get(DataStream.INDICES_FIELD.getPreferredName());
            Map<String, Object> firstGenIndexRepresentation = (Map<String, Object>) indicesRepresentation.get(0);
            assertThat(firstGenIndexRepresentation.get("index_name"), is(firstGenerationIndex.getName()));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(true));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(ilmPolicyName));
            assertThat(firstGenIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.ILM.displayValue));
            Map<String, Object> writeIndexRepresentation = (Map<String, Object>) indicesRepresentation.get(2);
            assertThat(writeIndexRepresentation.get("index_name"), is(writeIndex.getName()));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
            assertThat(writeIndexRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue));
            if (DataStream.isFailureStoreFeatureFlagEnabled()) {
                var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
                List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
                Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
                assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
                assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()), is(ManagedBy.UNMANAGED.displayValue));
            }
        }
    }
}
235064.151201elasticsearch
public void testParseMultiPolygon() throws IOException, ParseException {
    XContentBuilder multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon").startArray("coordinates").startArray().startArray().startArray().value(102.0).value(2.0).endArray().startArray().value(103.0).value(2.0).endArray().startArray().value(103.0).value(3.0).endArray().startArray().value(102.0).value(3.0).endArray().startArray().value(102.0).value(2.0).endArray().endArray().endArray().startArray().startArray().startArray().value(100.0).value(0.0).endArray().startArray().value(101.0).value(0.0).endArray().startArray().value(101.0).value(1.0).endArray().startArray().value(100.0).value(1.0).endArray().startArray().value(100.0).value(0.0).endArray().endArray().startArray().startArray().value(100.2).value(0.8).endArray().startArray().value(100.2).value(0.2).endArray().startArray().value(100.8).value(0.2).endArray().startArray().value(100.8).value(0.8).endArray().startArray().value(100.2).value(0.8).endArray().endArray().endArray().endArray().endObject();
    List<Coordinate> shellCoordinates = new ArrayList<>();
    shellCoordinates.add(new Coordinate(100, 0));
    shellCoordinates.add(new Coordinate(101, 0));
    shellCoordinates.add(new Coordinate(101, 1));
    shellCoordinates.add(new Coordinate(100, 1));
    shellCoordinates.add(new Coordinate(100, 0));
    List<Coordinate> holeCoordinates = new ArrayList<>();
    holeCoordinates.add(new Coordinate(100.2, 0.2));
    holeCoordinates.add(new Coordinate(100.8, 0.2));
    holeCoordinates.add(new Coordinate(100.8, 0.8));
    holeCoordinates.add(new Coordinate(100.2, 0.8));
    holeCoordinates.add(new Coordinate(100.2, 0.2));
    LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
    LinearRing[] holes = new LinearRing[1];
    holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
    Polygon withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
    shellCoordinates = new ArrayList<>();
    shellCoordinates.add(new Coordinate(102, 3));
    shellCoordinates.add(new Coordinate(103, 3));
    shellCoordinates.add(new Coordinate(103, 2));
    shellCoordinates.add(new Coordinate(102, 2));
    shellCoordinates.add(new Coordinate(102, 3));
    shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
    Polygon withoutHoles = GEOMETRY_FACTORY.createPolygon(shell, null);
    Shape expected = shapeCollection(withoutHoles, withHoles);
    assertGeometryEquals(expected, multiPolygonGeoJson, true);
    org.elasticsearch.geometry.LinearRing hole = new org.elasticsearch.geometry.LinearRing(new double[] { 100.8d, 100.8d, 100.2d, 100.2d, 100.8d }, new double[] { 0.8d, 0.2d, 0.2d, 0.8d, 0.8d });
    org.elasticsearch.geometry.MultiPolygon polygons = new org.elasticsearch.geometry.MultiPolygon(Arrays.asList(new org.elasticsearch.geometry.Polygon(new org.elasticsearch.geometry.LinearRing(new double[] { 103d, 103d, 102d, 102d, 103d }, new double[] { 2d, 3d, 3d, 2d, 2d })), new org.elasticsearch.geometry.Polygon(new org.elasticsearch.geometry.LinearRing(new double[] { 101d, 101d, 100d, 100d, 101d }, new double[] { 0d, 1d, 1d, 0d, 0d }), Collections.singletonList(hole))));
    assertGeometryEquals(polygons, multiPolygonGeoJson, false);
    multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon").startArray("coordinates").startArray().startArray().startArray().value(100.0).value(1.0).endArray().startArray().value(101.0).value(1.0).endArray().startArray().value(101.0).value(0.0).endArray().startArray().value(100.0).value(0.0).endArray().startArray().value(100.0).value(1.0).endArray().endArray().startArray().startArray().value(100.2).value(0.8).endArray().startArray().value(100.2).value(0.2).endArray().startArray().value(100.8).value(0.2).endArray().startArray().value(100.8).value(0.8).endArray().startArray().value(100.2).value(0.8).endArray().endArray().endArray().endArray().endObject();
    shellCoordinates = new ArrayList<>();
    shellCoordinates.add(new Coordinate(100, 1));
    shellCoordinates.add(new Coordinate(101, 1));
    shellCoordinates.add(new Coordinate(101, 0));
    shellCoordinates.add(new Coordinate(100, 0));
    shellCoordinates.add(new Coordinate(100, 1));
    holeCoordinates = new ArrayList<>();
    holeCoordinates.add(new Coordinate(100.2, 0.8));
    holeCoordinates.add(new Coordinate(100.2, 0.2));
    holeCoordinates.add(new Coordinate(100.8, 0.2));
    holeCoordinates.add(new Coordinate(100.8, 0.8));
    holeCoordinates.add(new Coordinate(100.2, 0.8));
    shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
    holes = new LinearRing[1];
    holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
    withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
    assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson, true);
    org.elasticsearch.geometry.LinearRing luceneHole = new org.elasticsearch.geometry.LinearRing(new double[] { 100.8d, 100.8d, 100.2d, 100.2d, 100.8d }, new double[] { 0.8d, 0.2d, 0.2d, 0.8d, 0.8d });
    org.elasticsearch.geometry.Polygon lucenePolygons = (new org.elasticsearch.geometry.Polygon(new org.elasticsearch.geometry.LinearRing(new double[] { 100d, 101d, 101d, 100d, 100d }, new double[] { 0d, 0d, 1d, 1d, 0d }), Collections.singletonList(luceneHole)));
    assertGeometryEquals(lucenePolygons, multiPolygonGeoJson, false);
}
234711.4314170elasticsearch
public void testToXContent() throws IOException {
    long now = System.currentTimeMillis();
    DataStreamLifecycle lifecycle = new DataStreamLifecycle();
    ExplainIndexDataStreamLifecycle explainIndex = createRandomIndexDataStreamLifecycleExplanation(now, lifecycle);
    explainIndex.setNowSupplier(() -> now);
    {
        Response response = new Response(List.of(explainIndex), null, null);
        XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
        response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
            try {
                xcontent.toXContent(builder, EMPTY_PARAMS);
            } catch (IOException e) {
                logger.error(e.getMessage(), e);
                fail(e.getMessage());
            }
        });
        Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
        Map<String, Object> indices = (Map<String, Object>) xContentMap.get("indices");
        assertThat(indices.size(), is(1));
        Map<String, Object> explainIndexMap = (Map<String, Object>) indices.get(explainIndex.getIndex());
        assertThat(explainIndexMap.get("managed_by_lifecycle"), is(explainIndex.isManagedByLifecycle()));
        if (explainIndex.isManagedByLifecycle()) {
            assertThat(explainIndexMap.get("index_creation_date_millis"), is(explainIndex.getIndexCreationDate()));
            assertThat(explainIndexMap.get("time_since_index_creation"), is(explainIndex.getTimeSinceIndexCreation(() -> now).toHumanReadableString(2)));
            if (explainIndex.getRolloverDate() != null) {
                assertThat(explainIndexMap.get("rollover_date_millis"), is(explainIndex.getRolloverDate()));
                assertThat(explainIndexMap.get("time_since_rollover"), is(explainIndex.getTimeSinceRollover(() -> now).toHumanReadableString(2)));
            }
            if (explainIndex.getGenerationTime(() -> now) != null) {
                assertThat(explainIndexMap.get("generation_time"), is(explainIndex.getGenerationTime(() -> now).toHumanReadableString(2)));
            } else {
                assertThat(explainIndexMap.get("generation_time"), is(nullValue()));
            }
            assertThat(explainIndexMap.get("lifecycle"), is(Map.of("enabled", true)));
            if (explainIndex.getError() != null) {
                Map<String, Object> errorObject = (Map<String, Object>) explainIndexMap.get("error");
                assertThat(errorObject.get(ErrorEntry.MESSAGE_FIELD.getPreferredName()), is(explainIndex.getError().error()));
                assertThat(errorObject.get(ErrorEntry.FIRST_OCCURRENCE_MILLIS_FIELD.getPreferredName()), is(explainIndex.getError().firstOccurrenceTimestamp()));
                assertThat(errorObject.get(ErrorEntry.LAST_RECORDED_MILLIS_FIELD.getPreferredName()), is(explainIndex.getError().recordedTimestamp()));
                assertThat(errorObject.get(ErrorEntry.RETRY_COUNT_FIELD.getPreferredName()), is(explainIndex.getError().retryCount()));
            } else {
                assertThat(explainIndexMap.get("error"), is(nullValue()));
            }
        }
    }
    {
        RolloverConditions rolloverConditions = new RolloverConditions(Map.of(MaxPrimaryShardDocsCondition.NAME, new MaxPrimaryShardDocsCondition(9L), MinPrimaryShardDocsCondition.NAME, new MinPrimaryShardDocsCondition(4L)));
        Response response = new Response(List.of(explainIndex), new RolloverConfiguration(rolloverConditions), DataStreamTestHelper.randomGlobalRetention());
        XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
        response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
            try {
                xcontent.toXContent(builder, EMPTY_PARAMS);
            } catch (IOException e) {
                logger.error(e.getMessage(), e);
                fail(e.getMessage());
            }
        });
        Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
        Map<String, Object> indices = (Map<String, Object>) xContentMap.get("indices");
        assertThat(indices.size(), is(1));
        Map<String, Object> explainIndexMap = (Map<String, Object>) indices.get(explainIndex.getIndex());
        assertThat(explainIndexMap.get("index"), is(explainIndex.getIndex()));
        assertThat(explainIndexMap.get("managed_by_lifecycle"), is(explainIndex.isManagedByLifecycle()));
        if (explainIndex.isManagedByLifecycle()) {
            assertThat(explainIndexMap.get("index_creation_date_millis"), is(explainIndex.getIndexCreationDate()));
            assertThat(explainIndexMap.get("time_since_index_creation"), is(explainIndex.getTimeSinceIndexCreation(() -> now).toHumanReadableString(2)));
            if (explainIndex.getRolloverDate() != null) {
                assertThat(explainIndexMap.get("rollover_date_millis"), is(explainIndex.getRolloverDate()));
                assertThat(explainIndexMap.get("time_since_rollover"), is(explainIndex.getTimeSinceRollover(() -> now).toHumanReadableString(2)));
            }
            if (explainIndex.getGenerationTime(() -> now) != null) {
                assertThat(explainIndexMap.get("generation_time"), is(explainIndex.getGenerationTime(() -> now).toHumanReadableString(2)));
            } else {
                assertThat(explainIndexMap.get("generation_time"), is(nullValue()));
            }
            if (explainIndex.getError() != null) {
                Map<String, Object> errorObject = (Map<String, Object>) explainIndexMap.get("error");
                assertThat(errorObject.get(ErrorEntry.MESSAGE_FIELD.getPreferredName()), is(explainIndex.getError().error()));
                assertThat(errorObject.get(ErrorEntry.FIRST_OCCURRENCE_MILLIS_FIELD.getPreferredName()), is(explainIndex.getError().firstOccurrenceTimestamp()));
                assertThat(errorObject.get(ErrorEntry.LAST_RECORDED_MILLIS_FIELD.getPreferredName()), is(explainIndex.getError().recordedTimestamp()));
                assertThat(errorObject.get(ErrorEntry.RETRY_COUNT_FIELD.getPreferredName()), is(explainIndex.getError().retryCount()));
            } else {
                assertThat(explainIndexMap.get("error"), is(nullValue()));
            }
            Map<String, Object> lifecycleMap = (Map<String, Object>) explainIndexMap.get("lifecycle");
            assertThat(lifecycleMap.get("data_retention"), nullValue());
            Map<String, Object> lifecycleRollover = (Map<String, Object>) lifecycleMap.get("rollover");
            assertThat(lifecycleRollover.get("min_primary_shard_docs"), is(4));
            assertThat(lifecycleRollover.get("max_primary_shard_docs"), is(9));
        }
    }
    {
        String index = randomAlphaOfLengthBetween(10, 30);
        ExplainIndexDataStreamLifecycle explainIndexWithNullGenerationDate = new ExplainIndexDataStreamLifecycle(index, true, randomBoolean(), now, randomBoolean() ? now + TimeValue.timeValueDays(1).getMillis() : null, null, lifecycle, randomBoolean() ? new ErrorEntry(System.currentTimeMillis(), new NullPointerException("bad times").getMessage(), System.currentTimeMillis(), randomIntBetween(0, 30)) : null);
        Response response = new Response(List.of(explainIndexWithNullGenerationDate), null, null);
        XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
        response.toXContentChunked(EMPTY_PARAMS).forEachRemaining(xcontent -> {
            try {
                xcontent.toXContent(builder, EMPTY_PARAMS);
            } catch (IOException e) {
                logger.error(e.getMessage(), e);
                fail(e.getMessage());
            }
        });
        Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
        Map<String, Object> indices = (Map<String, Object>) xContentMap.get("indices");
        assertThat(indices.size(), is(1));
        Map<String, Object> explainIndexMap = (Map<String, Object>) indices.get(explainIndexWithNullGenerationDate.getIndex());
        assertThat(explainIndexMap.get("managed_by_lifecycle"), is(true));
        assertThat(explainIndexMap.get("generation_time"), is(nullValue()));
    }
}
231747.5816237elasticsearch
private void doTestNoAutoReleaseOfIndicesOnReplacementNodes(boolean testMaxHeadroom) {
    AtomicReference<Set<String>> indicesToMarkReadOnly = new AtomicReference<>();
    AtomicReference<Set<String>> indicesToRelease = new AtomicReference<>();
    AtomicReference<ClusterState> currentClusterState = new AtomicReference<>();
    AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test_1").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(1)).put(IndexMetadata.builder("test_2").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(1)).build();
    RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test_1")).addAsNew(metadata.index("test_2")).build();
    final ClusterState clusterState = applyStartedShardsUntilNoChange(ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNormalNode("node1", "my-node1")).add(newNormalNode("node2", "my-node2"))).build(), allocation);
    assertThat(RoutingNodesHelper.shardsWithState(clusterState.getRoutingNodes(), ShardRoutingState.STARTED).size(), equalTo(8));
    final long totalBytes = testMaxHeadroom ? ByteSizeValue.ofGb(10000).getBytes() : 100;
    Map<ClusterInfo.NodeAndPath, ClusterInfo.ReservedSpace> reservedSpaces = new HashMap<>();
    final long reservedSpaceNode1 = testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 150)).getBytes() : between(0, 10);
    reservedSpaces.put(new ClusterInfo.NodeAndPath("node1", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode1).build());
    final long reservedSpaceNode2 = testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 150)).getBytes() : between(0, 10);
    reservedSpaces.put(new ClusterInfo.NodeAndPath("node2", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode2).build());
    currentClusterState.set(clusterState);
    final DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, currentClusterState::get, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, () -> 0L, (reason, priority, listener) -> {
        assertNotNull(listener);
        assertThat(priority, equalTo(Priority.HIGH));
        listener.onResponse(null);
    }) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToUpdate, Releasable onCompletion, boolean readOnly) {
            if (readOnly) {
                assertTrue(indicesToMarkReadOnly.compareAndSet(null, indicesToUpdate));
            } else {
                assertTrue(indicesToRelease.compareAndSet(null, indicesToUpdate));
            }
            onCompletion.close();
        }
    };
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    Map<String, DiskUsage> builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 9850)).getBytes() : between(5, 90)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(100, 9850)).getBytes() : between(5, 90)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertNull(indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    IndexMetadata indexMetadata = IndexMetadata.builder(clusterState.metadata().index("test_2")).settings(Settings.builder().put(clusterState.metadata().index("test_2").getSettings()).put(IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)).build();
    final String sourceNode;
    final String targetNode;
    if (randomBoolean()) {
        sourceNode = "node1";
        targetNode = "my-node2";
    } else {
        sourceNode = "node2";
        targetNode = "my-node1";
    }
    final ClusterState clusterStateWithBlocks = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(Collections.singletonMap(sourceNode, SingleNodeShutdownMetadata.builder().setNodeId(sourceNode).setReason("testing").setType(SingleNodeShutdownMetadata.Type.REPLACE).setTargetNodeName(targetNode).setStartedAtMillis(randomNonNegativeLong()).build()))).build()).blocks(ClusterBlocks.builder().addBlocks(indexMetadata).build()).build();
    assertTrue(clusterStateWithBlocks.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2"));
    currentClusterState.set(clusterStateWithBlocks);
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 99)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertThat(indicesToMarkReadOnly.get(), contains("test_1"));
    assertNull(indicesToRelease.get());
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertNull(indicesToMarkReadOnly.get());
    assertNull(indicesToRelease.get());
    final ClusterState clusterStateNoShutdown = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).removeCustom(NodesShutdownMetadata.TYPE).build()).blocks(ClusterBlocks.builder().addBlocks(indexMetadata).build()).build();
    assertTrue(clusterStateNoShutdown.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2"));
    currentClusterState.set(clusterStateNoShutdown);
    indicesToMarkReadOnly.set(null);
    indicesToRelease.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(150, 10000)).getBytes() : between(10, 100)));
    monitor.onNewInfo(clusterInfo(builder, reservedSpaces));
    assertNull(indicesToMarkReadOnly.get());
    assertThat(indicesToRelease.get(), contains("test_2"));
}
233923.6128144elasticsearch
private static NodeInfo createNodeInfo() {
    Map<String, Integer> componentVersions = IntStream.range(0, randomInt(5)).boxed().collect(Collectors.toUnmodifiableMap(i -> randomAlphaOfLength(10), i -> randomInt(Integer.MAX_VALUE)));
    Build build = Build.current();
    DiscoveryNode node = DiscoveryNodeUtils.builder("test_node").roles(emptySet()).version(VersionUtils.randomVersion(random()), IndexVersions.ZERO, IndexVersionUtils.randomVersion()).build();
    Settings settings = randomBoolean() ? null : Settings.builder().put("test", "setting").build();
    OsInfo osInfo = null;
    if (randomBoolean()) {
        int availableProcessors = randomIntBetween(1, 64);
        Processors allocatedProcessors = Processors.of((double) randomIntBetween(1, availableProcessors));
        long refreshInterval = randomBoolean() ? -1 : randomNonNegativeLong();
        String name = randomAlphaOfLengthBetween(3, 10);
        String arch = randomAlphaOfLengthBetween(3, 10);
        String version = randomAlphaOfLengthBetween(3, 10);
        osInfo = new OsInfo(refreshInterval, availableProcessors, allocatedProcessors, name, name, arch, version);
    }
    ProcessInfo process = randomBoolean() ? null : new ProcessInfo(randomInt(), randomBoolean(), randomNonNegativeLong());
    JvmInfo jvm = randomBoolean() ? null : JvmInfo.jvmInfo();
    ThreadPoolInfo threadPoolInfo = null;
    if (randomBoolean()) {
        int numThreadPools = randomIntBetween(1, 10);
        List<ThreadPool.Info> threadPoolInfos = new ArrayList<>(numThreadPools);
        for (int i = 0; i < numThreadPools; i++) {
            threadPoolInfos.add(new ThreadPool.Info(randomAlphaOfLengthBetween(3, 10), randomFrom(ThreadPool.ThreadPoolType.values()), randomInt()));
        }
        threadPoolInfo = new ThreadPoolInfo(threadPoolInfos);
    }
    Map<String, BoundTransportAddress> profileAddresses = new HashMap<>();
    BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[] { buildNewFakeTransportAddress() }, buildNewFakeTransportAddress());
    profileAddresses.put("test_address", dummyBoundTransportAddress);
    TransportInfo transport = randomBoolean() ? null : new TransportInfo(dummyBoundTransportAddress, profileAddresses);
    HttpInfo httpInfo = randomBoolean() ? null : new HttpInfo(dummyBoundTransportAddress, randomNonNegativeLong());
    RemoteClusterServerInfo remoteClusterServerInfo = randomBoolean() ? null : new RemoteClusterServerInfo(dummyBoundTransportAddress);
    PluginsAndModules pluginsAndModules = null;
    if (randomBoolean()) {
        int numPlugins = randomIntBetween(0, 5);
        List<PluginDescriptor> plugins = new ArrayList<>();
        for (int i = 0; i < numPlugins; i++) {
            var isStable = randomBoolean();
            var hasModuleName = randomBoolean();
            plugins.add(new PluginDescriptor(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(6, 32), "1.8", isStable ? null : randomAlphaOfLengthBetween(3, 10), isStable || hasModuleName == false ? null : randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean(), randomBoolean(), isStable));
        }
        int numModules = randomIntBetween(0, 5);
        List<PluginDescriptor> modules = new ArrayList<>();
        for (int i = 0; i < numModules; i++) {
            var isStable = randomBoolean();
            var hasModuleName = randomBoolean();
            modules.add(new PluginDescriptor(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(6, 32), "1.8", isStable ? null : randomAlphaOfLengthBetween(3, 10), isStable || hasModuleName == false ? null : randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean(), randomBoolean(), isStable));
        }
        pluginsAndModules = new PluginsAndModules(plugins.stream().map(PluginRuntimeInfo::new).toList(), modules);
    }
    IngestInfo ingestInfo = null;
    if (randomBoolean()) {
        int numProcessors = randomIntBetween(0, 5);
        List<ProcessorInfo> processors = new ArrayList<>(numProcessors);
        for (int i = 0; i < numProcessors; i++) {
            processors.add(new ProcessorInfo(randomAlphaOfLengthBetween(3, 10)));
        }
        ingestInfo = new IngestInfo(processors);
    }
    AggregationInfo aggregationInfo = null;
    if (randomBoolean()) {
        AggregationUsageService.Builder builder = new AggregationUsageService.Builder();
        int numOfAggs = randomIntBetween(0, 10);
        for (int i = 0; i < numOfAggs; i++) {
            String aggName = randomAlphaOfLength(10);
            try {
                if (randomBoolean()) {
                    builder.registerAggregationUsage(aggName);
                } else {
                    int numOfTypes = randomIntBetween(1, 10);
                    for (int j = 0; j < numOfTypes; j++) {
                        builder.registerAggregationUsage(aggName, randomAlphaOfLength(10));
                    }
                }
            } catch (IllegalArgumentException ex) {
            }
        }
        aggregationInfo = builder.build().info();
    }
    ByteSizeValue indexingBuffer = null;
    if (randomBoolean()) {
        indexingBuffer = ByteSizeValue.ofBytes(random().nextLong() & ((1L << 40) - 1));
    }
    return new NodeInfo(randomAlphaOfLengthBetween(6, 32), TransportVersionUtils.randomVersion(random()), IndexVersionUtils.randomVersion(random()), componentVersions, build, node, settings, osInfo, process, jvm, threadPoolInfo, transport, httpInfo, remoteClusterServerInfo, pluginsAndModules, ingestInfo, aggregationInfo, indexingBuffer);
}
234812.671219elasticsearch
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
    List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, OneHotEncoding.NAME.getPreferredName(), OneHotEncoding::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, TargetMeanEncoding.NAME.getPreferredName(), TargetMeanEncoding::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, FrequencyEncoding.NAME.getPreferredName(), FrequencyEncoding::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, CustomWordEmbedding.NAME.getPreferredName(), CustomWordEmbedding::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, NGram.NAME.getPreferredName(), NGram::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(PreProcessor.class, Multi.NAME.getPreferredName(), Multi::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TrainedModel.class, Tree.NAME.getPreferredName(), Tree::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TrainedModel.class, Ensemble.NAME.getPreferredName(), Ensemble::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TrainedModel.class, LangIdentNeuralNetwork.NAME.getPreferredName(), LangIdentNeuralNetwork::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(OutputAggregator.class, WeightedSum.NAME.getPreferredName(), WeightedSum::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(OutputAggregator.class, WeightedMode.NAME.getPreferredName(), WeightedMode::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(OutputAggregator.class, LogisticRegression.NAME.getPreferredName(), LogisticRegression::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(OutputAggregator.class, Exponent.NAME.getPreferredName(), Exponent::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, ClassificationInferenceResults.NAME, ClassificationInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, RegressionInferenceResults.NAME, RegressionInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, WarningInferenceResults.NAME, WarningInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, ErrorInferenceResults.NAME, ErrorInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, NerResults.NAME, NerResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, FillMaskResults.NAME, FillMaskResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, PyTorchPassThroughResults.NAME, PyTorchPassThroughResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, TextExpansionResults.NAME, TextExpansionResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, TextEmbeddingResults.NAME, TextEmbeddingResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, NlpClassificationInferenceResults.NAME, NlpClassificationInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, QuestionAnsweringInferenceResults.NAME, QuestionAnsweringInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, TextSimilarityInferenceResults.NAME, TextSimilarityInferenceResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, ChunkedTextEmbeddingResults.NAME, ChunkedTextEmbeddingResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceResults.class, ChunkedTextExpansionResults.NAME, ChunkedTextExpansionResults::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, ClassificationConfig.NAME.getPreferredName(), ClassificationConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, RegressionConfig.NAME.getPreferredName(), RegressionConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, NerConfig.NAME, NerConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, FillMaskConfig.NAME, FillMaskConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, TextExpansionConfig.NAME, TextExpansionConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, TextClassificationConfig.NAME, TextClassificationConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, PassThroughConfig.NAME, PassThroughConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, TextEmbeddingConfig.NAME, TextEmbeddingConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, ZeroShotClassificationConfig.NAME, ZeroShotClassificationConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, QuestionAnsweringConfig.NAME, QuestionAnsweringConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfig.class, TextSimilarityConfig.NAME, TextSimilarityConfig::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, ClassificationConfigUpdate.NAME.getPreferredName(), ClassificationConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, EmptyConfigUpdate.NAME, EmptyConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, FillMaskConfigUpdate.NAME, FillMaskConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, NerConfigUpdate.NAME, NerConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, PassThroughConfigUpdate.NAME, PassThroughConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, RegressionConfigUpdate.NAME.getPreferredName(), RegressionConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, ResultsFieldUpdate.NAME, ResultsFieldUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TextExpansionConfigUpdate.NAME, TextExpansionConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TextClassificationConfigUpdate.NAME, TextClassificationConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TextEmbeddingConfigUpdate.NAME, TextEmbeddingConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, ZeroShotClassificationConfigUpdate.NAME, ZeroShotClassificationConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, QuestionAnsweringConfigUpdate.NAME, QuestionAnsweringConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TextSimilarityConfigUpdate.NAME, TextSimilarityConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(InferenceConfigUpdate.class, TokenizationConfigUpdate.NAME, TokenizationConfigUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TrainedModelLocation.class, IndexLocation.INDEX.getPreferredName(), IndexLocation::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(Tokenization.class, BertJapaneseTokenization.NAME.getPreferredName(), BertJapaneseTokenization::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(Tokenization.class, BertTokenization.NAME.getPreferredName(), BertTokenization::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(Tokenization.class, MPNetTokenization.NAME.getPreferredName(), MPNetTokenization::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(Tokenization.class, RobertaTokenization.NAME, RobertaTokenization::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(Tokenization.class, XLMRobertaTokenization.NAME, XLMRobertaTokenization::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TokenizationUpdate.class, BertJapaneseTokenizationUpdate.NAME.getPreferredName(), BertJapaneseTokenizationUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TokenizationUpdate.class, BertTokenizationUpdate.NAME.getPreferredName(), BertTokenizationUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TokenizationUpdate.class, MPNetTokenizationUpdate.NAME.getPreferredName(), MPNetTokenizationUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TokenizationUpdate.class, RobertaTokenizationUpdate.NAME.getPreferredName(), RobertaTokenizationUpdate::new));
    namedWriteables.add(new NamedWriteableRegistry.Entry(TokenizationUpdate.class, XLMRobertaTokenizationUpdate.NAME.getPreferredName(), XLMRobertaTokenizationUpdate::new));
    return namedWriteables;
}
235849.8913160elasticsearch
private void testExpiredDeletion(Float customThrottle, int numUnusedState) throws Exception {
    String mlStateIndexName = AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX + "-000001";
    BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
    for (int i = 0; i < numUnusedState; i++) {
        String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i);
        IndexRequest indexRequest = new IndexRequest(mlStateIndexName).id(docId).source(Collections.emptyMap());
        bulkRequestBuilder.add(indexRequest);
    }
    ActionFuture<BulkResponse> indexUnusedStateDocsResponse = bulkRequestBuilder.execute();
    List<Job.Builder> jobs = new ArrayList<>();
    jobs.add(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(1000L).setDailyModelSnapshotRetentionAfterDays(1000L));
    jobs.add(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(1000L).setDailyModelSnapshotRetentionAfterDays(1000L));
    jobs.add(newJobBuilder("snapshots-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L).setDailyModelSnapshotRetentionAfterDays(2L));
    jobs.add(newJobBuilder("snapshots-retention-with-retain").setResultsRetentionDays(null).setModelSnapshotRetentionDays(2L).setDailyModelSnapshotRetentionAfterDays(2L));
    jobs.add(newJobBuilder("results-and-snapshots-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(2L).setDailyModelSnapshotRetentionAfterDays(2L));
    List<String> shortExpiryForecastIds = new ArrayList<>();
    long now = System.currentTimeMillis();
    long oneDayAgo = now - TimeValue.timeValueHours(48).getMillis() - 1;
    for (Job.Builder job : jobs) {
        putJob(job);
        String datafeedId = job.getId() + "-feed";
        DatafeedConfig.Builder datafeedConfig = new DatafeedConfig.Builder(datafeedId, job.getId());
        datafeedConfig.setIndices(Collections.singletonList(DATA_INDEX));
        DatafeedConfig datafeed = datafeedConfig.build();
        putDatafeed(datafeed);
        openJob(job.getId());
        startDatafeed(datafeedId, 0, now - TimeValue.timeValueHours(24).getMillis());
    }
    for (Job.Builder job : jobs) {
        waitUntilJobIsClosed(job.getId());
    }
    for (Job.Builder job : jobs) {
        assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(47)));
        assertThat(getRecords(job.getId()).size(), equalTo(2));
        List<ModelSnapshot> modelSnapshots = getModelSnapshots(job.getId());
        assertThat(modelSnapshots.size(), equalTo(1));
        String snapshotDocId = ModelSnapshot.documentId(modelSnapshots.get(0));
        String snapshotUpdate = "{ \"timestamp\": " + oneDayAgo + "}";
        UpdateRequest updateSnapshotRequest = new UpdateRequest(".ml-anomalies-" + job.getId(), snapshotDocId);
        updateSnapshotRequest.doc(snapshotUpdate.getBytes(StandardCharsets.UTF_8), XContentType.JSON);
        client().execute(TransportUpdateAction.TYPE, updateSnapshotRequest).get();
        openJob(job.getId());
        String forecastShortExpiryId = forecast(job.getId(), TimeValue.timeValueHours(1), TimeValue.timeValueSeconds(1));
        shortExpiryForecastIds.add(forecastShortExpiryId);
        String forecastDefaultExpiryId = forecast(job.getId(), TimeValue.timeValueHours(1), null);
        String forecastNoExpiryId = forecast(job.getId(), TimeValue.timeValueHours(1), TimeValue.ZERO);
        waitForecastToFinish(job.getId(), forecastShortExpiryId);
        waitForecastToFinish(job.getId(), forecastDefaultExpiryId);
        waitForecastToFinish(job.getId(), forecastNoExpiryId);
    }
    refresh("*");
    long before = System.currentTimeMillis() / 1000;
    assertBusy(() -> assertNotEquals(before, System.currentTimeMillis() / 1000), 1, TimeUnit.SECONDS);
    for (Job.Builder job : jobs) {
        startDatafeed(job.getId() + "-feed", 0, now);
        waitUntilJobIsClosed(job.getId());
        assertThat(getBuckets(job.getId()).size(), is(greaterThanOrEqualTo(70)));
        assertThat(getRecords(job.getId()).size(), equalTo(2));
        List<ModelSnapshot> modelSnapshots = getModelSnapshots(job.getId());
        assertThat(modelSnapshots.size(), equalTo(2));
    }
    retainAllSnapshots("snapshots-retention-with-retain");
    long totalModelSizeStatsBeforeDelete = SearchResponseUtils.getTotalHitsValue(prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")));
    long totalNotificationsCountBeforeDelete = SearchResponseUtils.getTotalHitsValue(prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX));
    assertThat(totalModelSizeStatsBeforeDelete, greaterThan(0L));
    assertThat(totalNotificationsCountBeforeDelete, greaterThan(0L));
    List<ForecastRequestStats> forecastStats = getForecastStats();
    assertThat(forecastStats.size(), equalTo(jobs.size() * 3));
    for (ForecastRequestStats forecastStat : forecastStats) {
        assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount()));
    }
    assertFalse(indexUnusedStateDocsResponse.get().hasFailures());
    assertThat(deleteExpiredData(customThrottle).isDeleted(), is(true));
    assertThat(getBuckets("no-retention").size(), is(greaterThanOrEqualTo(70)));
    assertThat(getRecords("no-retention").size(), equalTo(2));
    assertThat(getModelSnapshots("no-retention").size(), equalTo(2));
    List<Bucket> buckets = getBuckets("results-retention");
    assertThat(buckets.size(), is(lessThanOrEqualTo(25)));
    assertThat(buckets.size(), is(greaterThanOrEqualTo(22)));
    assertThat(buckets.get(0).getTimestamp().getTime(), greaterThanOrEqualTo(oneDayAgo));
    assertThat(getRecords("results-retention").size(), equalTo(0));
    assertThat(getModelSnapshots("results-retention").size(), equalTo(2));
    assertThat(getBuckets("snapshots-retention").size(), is(greaterThanOrEqualTo(70)));
    assertThat(getRecords("snapshots-retention").size(), equalTo(2));
    assertThat(getModelSnapshots("snapshots-retention").size(), equalTo(1));
    assertThat(getBuckets("snapshots-retention-with-retain").size(), is(greaterThanOrEqualTo(70)));
    assertThat(getRecords("snapshots-retention-with-retain").size(), equalTo(2));
    assertThat(getModelSnapshots("snapshots-retention-with-retain").size(), equalTo(2));
    buckets = getBuckets("results-and-snapshots-retention");
    assertThat(buckets.size(), is(lessThanOrEqualTo(25)));
    assertThat(buckets.size(), is(greaterThanOrEqualTo(22)));
    assertThat(buckets.get(0).getTimestamp().getTime(), greaterThanOrEqualTo(oneDayAgo));
    assertThat(getRecords("results-and-snapshots-retention").size(), equalTo(0));
    assertThat(getModelSnapshots("results-and-snapshots-retention").size(), equalTo(1));
    long totalModelSizeStatsAfterDelete = SearchResponseUtils.getTotalHitsValue(prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN).setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")));
    long totalNotificationsCountAfterDelete = SearchResponseUtils.getTotalHitsValue(prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX));
    assertThat(totalModelSizeStatsAfterDelete, equalTo(totalModelSizeStatsBeforeDelete));
    assertThat(totalNotificationsCountAfterDelete, greaterThanOrEqualTo(totalNotificationsCountBeforeDelete));
    forecastStats = getForecastStats();
    assertThat(forecastStats.size(), equalTo(jobs.size() * 2));
    for (ForecastRequestStats forecastStat : forecastStats) {
        assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount()));
    }
    for (Job.Builder job : jobs) {
        for (String forecastId : shortExpiryForecastIds) {
            assertThat(countForecastDocs(job.getId(), forecastId), equalTo(0L));
        }
    }
    assertResponse(prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()).setFetchSource(false).setTrackTotalHits(true).setSize(10000), stateDocsResponse -> {
        assertThat(stateDocsResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(5L));
        int nonExistingJobDocsCount = 0;
        List<String> nonExistingJobExampleIds = new ArrayList<>();
        for (SearchHit hit : stateDocsResponse.getHits().getHits()) {
            if (hit.getId().startsWith("non_existing_job")) {
                nonExistingJobDocsCount++;
                if (nonExistingJobExampleIds.size() < 10) {
                    nonExistingJobExampleIds.add(hit.getId());
                }
            }
        }
        assertThat("Documents for non_existing_job are still around; examples: " + nonExistingJobExampleIds, nonExistingJobDocsCount, equalTo(0));
    });
}
233265.5821172elasticsearch
public MlMemoryAutoscalingCapacity scale(Settings configuration, AutoscalingDeciderContext context, MlAutoscalingContext mlContext, int allocatedProcessorsScale) {
    final ClusterState clusterState = context.state();
    scaleTimer.lastScaleToScaleIntervalMillis().ifPresent(scaleInterval -> mlMemoryTracker.setAutoscalingCheckInterval(Duration.ofMillis(scaleInterval)));
    final int numAnalyticsJobsInQueue = MlAutoscalingDeciderService.NUM_ANALYTICS_JOBS_IN_QUEUE.get(configuration);
    final int numAnomalyJobsInQueue = MlAutoscalingDeciderService.NUM_ANOMALY_JOBS_IN_QUEUE.get(configuration);
    final NativeMemoryCapacity currentScale = currentScale(mlContext.mlNodes);
    if (mlContext.mlNodes.isEmpty() && mlContext.hasWaitingTasks()) {
        return scaleUpFromZero(mlContext);
    }
    if (mlMemoryTracker.isRecentlyRefreshed() == false) {
        logger.debug("view of job memory is stale given duration [{}]. Not attempting to make scaling decision", mlMemoryTracker.getStalenessDuration());
        return refreshMemoryTrackerAndBuildEmptyDecision(MEMORY_STALE);
    }
    List<NodeLoad> nodeLoads = new ArrayList<>(mlContext.mlNodes.size());
    boolean nodeLoadIsMemoryAccurate = true;
    for (DiscoveryNode node : mlContext.mlNodes) {
        NodeLoad nodeLoad = nodeLoadDetector.detectNodeLoad(clusterState, node, maxOpenJobs, maxMachineMemoryPercent, useAuto);
        if (nodeLoad.getError() != null) {
            logger.warn("[{}] failed to gather node load limits, failure [{}]. Returning no scale", node.getId(), nodeLoad.getError());
            return refreshMemoryTrackerAndBuildEmptyDecision("Passing currently perceived capacity as there was a failure gathering node limits [" + nodeLoad.getError() + "]");
        }
        nodeLoads.add(nodeLoad);
        if (nodeLoad.isUseMemory() == false) {
            nodeLoadIsMemoryAccurate = false;
            logger.debug("[{}] failed to gather node load - memory usage for one or more tasks not available.", node.getId());
        }
    }
    if (nodeLoadIsMemoryAccurate == false) {
        return refreshMemoryTrackerAndBuildEmptyDecision("Passing currently perceived capacity as nodes were unable to provide an accurate view of their memory usage");
    }
    final Optional<MlMemoryAutoscalingCapacity> scaleUpDecision = checkForScaleUp(numAnomalyJobsInQueue, numAnalyticsJobsInQueue, nodeLoads, mlContext.waitingAnomalyJobs, mlContext.waitingSnapshotUpgrades, mlContext.waitingAnalyticsJobs, mlContext.waitingAllocatedModels, calculateFutureAvailableCapacity(mlContext.persistentTasks, nodeLoads).orElse(null), currentScale);
    if (scaleUpDecision.isPresent()) {
        scaleTimer.resetScaleDownCoolDown();
        return scaleUpDecision.get();
    }
    final List<String> partiallyAllocatedModels = mlContext.findPartiallyAllocatedModels();
    if (mlContext.waitingAnalyticsJobs.isEmpty() == false || mlContext.waitingSnapshotUpgrades.isEmpty() == false || mlContext.waitingAnomalyJobs.isEmpty() == false || partiallyAllocatedModels.isEmpty() == false) {
        scaleTimer.resetScaleDownCoolDown();
        return MlMemoryAutoscalingCapacity.from(context.currentCapacity()).setReason(String.format(Locale.ROOT, "Passing currently perceived capacity as there are [%d] model snapshot upgrades, " + "[%d] analytics and [%d] anomaly detection jobs in the queue, " + "[%d] trained models not fully-allocated, " + "but the number in the queue is less than the configured maximum allowed " + "or the queued jobs will eventually be assignable at the current size.", mlContext.waitingSnapshotUpgrades.size(), mlContext.waitingAnalyticsJobs.size(), mlContext.waitingAnomalyJobs.size(), partiallyAllocatedModels.size())).build();
    }
    long maxTaskMemoryBytes = maxMemoryBytes(mlContext);
    if (maxTaskMemoryBytes == 0L) {
        assert mlContext.isEmpty() == false : "No tasks or models at all should have put us in the scale down to zero branch";
        logger.warn("The calculated minimum required node size was unexpectedly [0] as there are [{}] anomaly job tasks, " + "[{}] model snapshot upgrade tasks, [{}] data frame analytics tasks and [{}] model assignments", mlContext.anomalyDetectionTasks.size(), mlContext.snapshotUpgradeTasks.size(), mlContext.dataframeAnalyticsTasks.size(), mlContext.modelAssignments.size());
        logger.debug(() -> format("persistent tasks that caused unexpected scaling situation: [%s]", (mlContext.persistentTasks == null) ? "null" : Strings.toString(mlContext.persistentTasks)));
        return refreshMemoryTrackerAndBuildEmptyDecision("Passing currently perceived capacity as there are running analytics and anomaly jobs or deployed models, " + "but their assignment explanations are unexpected or their memory usage estimates are inaccurate.");
    }
    final Optional<MlMemoryAutoscalingCapacity> maybeScaleDown = checkForScaleDown(nodeLoads, maxTaskMemoryBytes, currentScale).map(result -> {
        MlMemoryAutoscalingCapacity capacity = ensureScaleDown(result, MlMemoryAutoscalingCapacity.from(context.currentCapacity()).build());
        if (capacity == null) {
            return null;
        }
        if (modelAssignmentsRequireMoreThanHalfCpu(mlContext.modelAssignments.values(), mlContext.mlNodes, allocatedProcessorsScale)) {
            logger.debug("not down-scaling; model assignments require more than half of the ML tier's allocated processors");
            return null;
        }
        return capacity;
    });
    if (maybeScaleDown.isPresent()) {
        final MlMemoryAutoscalingCapacity scaleDownDecisionResult = maybeScaleDown.get();
        if (nodeLoads.size() > 1) {
            long totalAssignedJobs = nodeLoads.stream().mapToLong(NodeLoad::getNumAssignedJobsAndModels).sum();
            long maxOpenJobsCopy = this.maxOpenJobs;
            if (totalAssignedJobs > maxOpenJobsCopy) {
                String msg = String.format(Locale.ROOT, "not scaling down as the total number of jobs [%d] exceeds the setting [%s (%d)]. " + "To allow a scale down [%s] must be increased.", totalAssignedJobs, MAX_OPEN_JOBS_PER_NODE.getKey(), maxOpenJobsCopy, MAX_OPEN_JOBS_PER_NODE.getKey());
                logger.info(() -> format("%s Calculated potential scaled down capacity [%s]", msg, scaleDownDecisionResult));
                return MlMemoryAutoscalingCapacity.from(context.currentCapacity()).setReason(msg).build();
            }
        }
        long msLeftToScale = scaleTimer.markDownScaleAndGetMillisLeftFromDelay(configuration);
        if (msLeftToScale <= 0) {
            return scaleDownDecisionResult;
        }
        TimeValue downScaleDelay = MlAutoscalingDeciderService.DOWN_SCALE_DELAY.get(configuration);
        logger.debug(() -> format("not scaling down as the current scale down delay [%s] is not satisfied." + " The last time scale down was detected [%s]. Calculated scaled down capacity [%s] ", downScaleDelay.getStringRep(), DEFAULT_FORMATTER.format(ofEpochMilli(scaleTimer.downScaleDetectedMillis())), scaleDownDecisionResult));
        return MlMemoryAutoscalingCapacity.from(context.currentCapacity()).setReason(String.format(Locale.ROOT, "Passing currently perceived capacity as down scale delay has not been satisfied; configured delay [%s] " + "last detected scale down event [%s]. Will request scale down in approximately [%s]", downScaleDelay.getStringRep(), XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(scaleTimer.downScaleDetectedMillis())), TimeValue.timeValueMillis(msLeftToScale).getStringRep())).build();
    }
    return MlMemoryAutoscalingCapacity.from(context.currentCapacity()).setReason("Passing currently perceived capacity as no scaling changes are necessary").build();
}
233252.4624171elasticsearch
 Optional<MlMemoryAutoscalingCapacity> checkForScaleUp(int numAnomalyJobsInQueue, int numAnalyticsJobsInQueue, List<NodeLoad> nodeLoads, List<String> waitingAnomalyJobs, List<String> waitingSnapshotUpgrades, List<String> waitingAnalyticsJobs, List<String> waitingAllocatedModels, @Nullable NativeMemoryCapacity futureFreedCapacity, NativeMemoryCapacity currentScale) {
    logger.debug(() -> format("Checking for scale up -" + " waiting data frame analytics jobs [%s]" + " data frame analytics jobs allowed to queue [%s]" + " waiting anomaly detection jobs (including model snapshot upgrades) [%s]" + " anomaly detection jobs allowed to queue [%s]" + " waiting models [%s]" + " future freed capacity [%s]" + " current scale [%s]", waitingAnalyticsJobs.size(), numAnalyticsJobsInQueue, waitingAnomalyJobs.size() + waitingSnapshotUpgrades.size(), numAnomalyJobsInQueue, waitingAllocatedModels.size(), futureFreedCapacity, currentScale));
    if (waitingAnalyticsJobs.size() > numAnalyticsJobsInQueue || waitingAnomalyJobs.size() + waitingSnapshotUpgrades.size() > numAnomalyJobsInQueue || waitingAllocatedModels.size() > 0) {
        Tuple<NativeMemoryCapacity, List<NodeLoad>> anomalyCapacityAndNewLoad = determineUnassignableJobs(Stream.concat(waitingAnomalyJobs.stream(), waitingSnapshotUpgrades.stream()).toList(), this::getAnomalyMemoryRequirement, NodeLoad.Builder::incNumAssignedAnomalyDetectorJobs, numAnomalyJobsInQueue, nodeLoads).orElse(Tuple.tuple(NativeMemoryCapacity.ZERO, nodeLoads));
        Tuple<NativeMemoryCapacity, List<NodeLoad>> analyticsCapacityAndNewLoad = determineUnassignableJobs(waitingAnalyticsJobs, this::getAnalyticsMemoryRequirement, NodeLoad.Builder::incNumAssignedDataFrameAnalyticsJobs, numAnalyticsJobsInQueue, anomalyCapacityAndNewLoad.v2()).orElse(Tuple.tuple(NativeMemoryCapacity.ZERO, anomalyCapacityAndNewLoad.v2()));
        Tuple<NativeMemoryCapacity, List<NodeLoad>> modelCapacityAndNewLoad = determineUnassignableJobs(waitingAllocatedModels, this::getAllocatedModelRequirement, NodeLoad.Builder::incNumAssignedNativeInferenceModels, 0, analyticsCapacityAndNewLoad.v2()).orElse(Tuple.tuple(NativeMemoryCapacity.ZERO, analyticsCapacityAndNewLoad.v2()));
        if (analyticsCapacityAndNewLoad.v1().equals(NativeMemoryCapacity.ZERO) && anomalyCapacityAndNewLoad.v1().equals(NativeMemoryCapacity.ZERO) && modelCapacityAndNewLoad.v1().equals(NativeMemoryCapacity.ZERO)) {
            logger.debug("no_scale event as current capacity, even though there are waiting jobs, is adequate to run the queued jobs");
            return Optional.empty();
        }
        long maxFreeNodeMemAfterPossibleAssignments = modelCapacityAndNewLoad.v2().stream().filter(nodeLoad -> nodeLoad.getError() == null && nodeLoad.isUseMemory()).map(NodeLoad::getFreeMemoryExcludingPerNodeOverhead).max(Long::compareTo).orElse(0L);
        if (maxFreeNodeMemAfterPossibleAssignments > currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead() || maxFreeNodeMemAfterPossibleAssignments > currentScale.getTierMlNativeMemoryRequirementExcludingOverhead()) {
            assert false : "highest free node memory after possible assignments [" + maxFreeNodeMemAfterPossibleAssignments + "] greater than current scale [" + currentScale + "]";
            logger.warn("Highest free node memory after possible assignments [" + maxFreeNodeMemAfterPossibleAssignments + "] greater than current scale [" + currentScale + "] - will scale up without considering current free memory");
            maxFreeNodeMemAfterPossibleAssignments = 0;
        }
        NativeMemoryCapacity updatedCapacity = new NativeMemoryCapacity(-maxFreeNodeMemAfterPossibleAssignments, 0).merge(currentScale).merge(analyticsCapacityAndNewLoad.v1()).merge(anomalyCapacityAndNewLoad.v1()).merge(modelCapacityAndNewLoad.v1());
        MlMemoryAutoscalingCapacity requiredCapacity = updatedCapacity.autoscalingCapacity(maxMachineMemoryPercent, useAuto, mlNativeMemoryForLargestMlNode, nodeAvailabilityZoneMapper.getNumMlAvailabilityZones().orElse(1)).setReason("requesting scale up as number of jobs in queues exceeded configured limit " + "or there is at least one trained model waiting for assignment " + "and current capacity is not large enough for waiting jobs or models").build();
        return Optional.of(requiredCapacity);
    }
    if (waitingAnalyticsJobs.isEmpty() == false || waitingSnapshotUpgrades.isEmpty() == false || waitingAnomalyJobs.isEmpty() == false) {
        if (futureFreedCapacity == null) {
            Optional<Long> maxSize = Stream.concat(waitingAnalyticsJobs.stream().map(this::getAnalyticsMemoryRequirement), Stream.concat(waitingAnomalyJobs.stream().map(this::getAnomalyMemoryRequirement), waitingSnapshotUpgrades.stream().map(this::getAnomalyMemoryRequirement))).filter(Objects::nonNull).max(Long::compareTo);
            if (maxSize.isPresent() && maxSize.get() > currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead()) {
                MlMemoryAutoscalingCapacity requiredCapacity = new NativeMemoryCapacity(Math.max(currentScale.getTierMlNativeMemoryRequirementExcludingOverhead(), maxSize.get()), maxSize.get()).autoscalingCapacity(maxMachineMemoryPercent, useAuto, mlNativeMemoryForLargestMlNode, nodeAvailabilityZoneMapper.getNumMlAvailabilityZones().orElse(1)).setReason("requesting scale up as there is no node large enough to handle queued jobs").build();
                return Optional.of(requiredCapacity);
            }
            logger.debug("Cannot make a scaling decision as future freed capacity is not known and largest job could fit on an existing node");
            return Optional.empty();
        }
        long newTierNeeded = -futureFreedCapacity.getTierMlNativeMemoryRequirementExcludingOverhead();
        long newNodeMax = currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead();
        for (String analyticsJob : waitingAnalyticsJobs) {
            Long requiredMemory = getAnalyticsMemoryRequirement(analyticsJob);
            if (requiredMemory == null) {
                continue;
            }
            newTierNeeded += requiredMemory;
            newNodeMax = Math.max(newNodeMax, requiredMemory);
        }
        for (String anomalyJob : waitingAnomalyJobs) {
            Long requiredMemory = getAnomalyMemoryRequirement(anomalyJob);
            if (requiredMemory == null) {
                continue;
            }
            newTierNeeded += requiredMemory;
            newNodeMax = Math.max(newNodeMax, requiredMemory);
        }
        for (String snapshotUpgrade : waitingSnapshotUpgrades) {
            Long requiredMemory = getAnomalyMemoryRequirement(snapshotUpgrade);
            if (requiredMemory == null) {
                continue;
            }
            newTierNeeded += requiredMemory;
            newNodeMax = Math.max(newNodeMax, requiredMemory);
        }
        if (newNodeMax > currentScale.getNodeMlNativeMemoryRequirementExcludingOverhead() || newTierNeeded > 0L) {
            NativeMemoryCapacity newCapacity = new NativeMemoryCapacity(Math.max(0L, newTierNeeded), newNodeMax);
            MlMemoryAutoscalingCapacity requiredCapacity = currentScale.merge(newCapacity).autoscalingCapacity(maxMachineMemoryPercent, useAuto, mlNativeMemoryForLargestMlNode, nodeAvailabilityZoneMapper.getNumMlAvailabilityZones().orElse(1)).setReason("scaling up as adequate space would not automatically become available when running jobs finish").build();
            return Optional.of(requiredCapacity);
        }
    }
    return Optional.empty();
}
235549.83194elasticsearch
public void testSingleValueWithDate() throws IOException {
    List<MultiValuesSourceFieldConfig> fields = new ArrayList<>();
    String dateExclude = randomBoolean() ? randomFrom("2022-06-02", "2022-06-03", "1970-01-01") : null;
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD1).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD2).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD3).build());
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(DATE_FIELD).setIncludeExclude(dateExclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(dateExclude)))) : null).build());
    double minimumSupport = randomDoubleBetween(0.13, 0.51, true);
    int minimumSetSize = randomIntBetween(2, 6);
    int size = randomIntBetween(1, 100);
    Query query = new MatchAllDocsQuery();
    MappedFieldType keywordType1 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD1);
    MappedFieldType keywordType2 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD2);
    MappedFieldType keywordType3 = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD3);
    MappedFieldType dateType = dateFieldType(DATE_FIELD);
    MappedFieldType ipType = new IpFieldMapper.IpFieldType(IP_FIELD);
    List<FrequentItemSet> expectedResults = List.of(new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD3, List.of("type-1")), 5, 0.5), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD2, List.of("client-1"), KEYWORD_FIELD3, List.of("type-1")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD3, List.of("type-1"), DATE_FIELD, List.of("2022-06-03")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), DATE_FIELD, List.of("2022-06-02")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), KEYWORD_FIELD3, List.of("type-2")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD2, List.of("client-1"), DATE_FIELD, List.of("2022-06-03")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), KEYWORD_FIELD2, List.of("client-2"), DATE_FIELD, List.of("2022-06-02"), KEYWORD_FIELD3, List.of("type-3")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD2, List.of("client-1"), DATE_FIELD, List.of("2022-06-03"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-2"), KEYWORD_FIELD2, List.of("client-2"), KEYWORD_FIELD3, List.of("type-2")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), DATE_FIELD, List.of("2022-06-01"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("host-1"), KEYWORD_FIELD2, List.of("client-2"), KEYWORD_FIELD3, List.of("type-1")), 2, 0.2));
    FrequentItemSetsAggregationBuilder builder = new FrequentItemSetsAggregationBuilder("fi", fields, minimumSupport, minimumSetSize, size, null, randomFrom(EXECUTION_HINT_ALLOWED_MODES));
    testCase(iw -> {
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.1")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-03"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.4")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-03"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.4")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-01"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.22")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-3")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-01"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.12")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-03"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.1")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-3")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-02"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-3")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-02"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-1")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-03"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-1")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.5")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-2")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-1")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-01"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("host-2")), new SortedSetDocValuesField(IP_FIELD, encodeIp("192.168.0.15")), new SortedSetDocValuesField(KEYWORD_FIELD2, new BytesRef("client-3")), new SortedSetDocValuesField(KEYWORD_FIELD3, new BytesRef("type-2")), new SortedNumericDocValuesField(DATE_FIELD, dateFieldType(DATE_FIELD).parse("2022-06-02"))));
    }, (InternalItemSetMapReduceAggregation<?, ?, ?, EclatResult> results) -> {
        assertNotNull(results);
        assertResults(expectedResults, results.getMapReduceResult().getFrequentItemSets(), minimumSupport, minimumSetSize, size, dateExclude, null);
    }, new AggTestConfig(builder, keywordType1, keywordType2, keywordType3, dateType, ipType).withQuery(query));
}
233070.261253elasticsearch
public void testCheckIfJobsCanBeMovedInLeastEfficientWayMemoryOnly() {
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(100L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(10L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(995L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(976L, 0), "node_b", MlJobRequirements.of(986L, 0), "node_c", MlJobRequirements.of(967L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(10L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(976L, 0, 3), "node_b", MlJobRequirements.of(986L, 0, 3), "node_c", MlJobRequirements.of(967L, 0, 2)), 1000L, 10, 4));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(980L, 0), "node_b", MlJobRequirements.of(990L, 0), "node_c", MlJobRequirements.of(970L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(10L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(976L, 0), "node_b", MlJobRequirements.of(986L, 0), "node_c", MlJobRequirements.of(967L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(40L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(40L, 0)), Map.of("node_a", MlJobRequirements.of(976L, 0), "node_b", MlJobRequirements.of(946L, 0), "node_c", MlJobRequirements.of(967L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(130L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(20L, 0), MlJobRequirements.of(30L, 0), MlJobRequirements.of(40L, 0), MlJobRequirements.of(50L, 0), MlJobRequirements.of(60L, 0), MlJobRequirements.of(70L, 0)), Map.of("node_a", MlJobRequirements.of(886L, 0), "node_b", MlJobRequirements.of(926L, 0), "node_c", MlJobRequirements.of(967L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(70L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(20L, 0), MlJobRequirements.of(30L, 0), MlJobRequirements.of(40L, 0), MlJobRequirements.of(50L, 0), MlJobRequirements.of(60L, 0), MlJobRequirements.of(70L, 0)), Map.of("node_a", MlJobRequirements.of(886L, 0), "node_b", MlJobRequirements.of(906L, 0), "node_c", MlJobRequirements.of(917L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(70L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(20L, 0), MlJobRequirements.of(30L, 0), MlJobRequirements.of(40L, 0), MlJobRequirements.of(50L, 0), MlJobRequirements.of(60L, 0), MlJobRequirements.of(70L, 0)), Map.of("node_a", MlJobRequirements.of(866L, 0), "node_b", MlJobRequirements.of(886L, 0), "node_c", MlJobRequirements.of(917L, 0)), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(500L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(500L, 0), MlJobRequirements.of(200L, 0)), Map.of("node_a", MlJobRequirements.of(1400L, 0), "node_b", MlJobRequirements.of(1700L, 0)), 2000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(700L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(500L, 0), MlJobRequirements.of(200L, 0)), Collections.emptyMap(), 2000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(Collections.emptyList(), Collections.emptyMap(), 2000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(Collections.emptyList(), Map.of("node_a", MlJobRequirements.of(1400L, 0), "node_b", MlJobRequirements.of(1700L, 0)), 2000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
}
239020.691166elasticsearch
public void testElasticFleetServerPrivileges() {
    final String allowedApplicationActionPattern = "example/custom/action/*";
    final String kibanaApplication = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana");
    final Role role = Role.buildFromRoleDescriptor(ElasticServiceAccounts.ACCOUNTS.get("elastic/fleet-server").roleDescriptor(), new FieldPermissionsCache(Settings.EMPTY), RESTRICTED_INDICES, List.of(new ApplicationPrivilegeDescriptor(kibanaApplication, "reserved_fleet-setup", Set.of(allowedApplicationActionPattern), Map.of())));
    final Authentication authentication = AuthenticationTestHelper.builder().serviceAccount().build();
    assertThat(role.cluster().check(CreateApiKeyAction.NAME, new CreateApiKeyRequest(randomAlphaOfLengthBetween(3, 8), null, null), authentication), is(true));
    assertThat(role.cluster().check(GetApiKeyAction.NAME, GetApiKeyRequest.builder().ownedByAuthenticatedUser().build(), authentication), is(true));
    assertThat(role.cluster().check(InvalidateApiKeyAction.NAME, InvalidateApiKeyRequest.forOwnedApiKeys(), authentication), is(true));
    assertThat(role.cluster().check(GetApiKeyAction.NAME, randomFrom(GetApiKeyRequest.builder().build()), authentication), is(false));
    assertThat(role.cluster().check(InvalidateApiKeyAction.NAME, InvalidateApiKeyRequest.usingUserName(randomAlphaOfLengthBetween(3, 16)), authentication), is(false));
    List.of("logs-" + randomAlphaOfLengthBetween(1, 20), "metrics-" + randomAlphaOfLengthBetween(1, 20), "traces-" + randomAlphaOfLengthBetween(1, 20), ".logs-endpoint.diagnostic.collection-" + randomAlphaOfLengthBetween(1, 20), ".logs-endpoint.action.responses-" + randomAlphaOfLengthBetween(1, 20), ".logs-endpoint.heartbeat-" + randomAlphaOfLengthBetween(1, 20)).stream().map(this::mockIndexAbstraction).forEach(index -> {
        assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(index), is(false));
    });
    final IndexAbstraction profilingIndex = mockIndexAbstraction("profiling-" + randomAlphaOfLengthBetween(1, 20));
    assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(profilingIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(profilingIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(profilingIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(profilingIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(profilingIndex), is(false));
    List.of("synthetics-" + randomAlphaOfLengthBetween(1, 20)).stream().map(this::mockIndexAbstraction).forEach(index -> {
        assertThat(role.indices().allowedIndicesMatcher(TransportAutoPutMappingAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(index), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(index), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(index), is(false));
    });
    List.of(".fleet-actions" + randomAlphaOfLengthBetween(1, 20), ".fleet-agents" + randomAlphaOfLengthBetween(1, 20), ".fleet-enrollment-api-keys" + randomAlphaOfLengthBetween(1, 20), ".fleet-policies" + randomAlphaOfLengthBetween(1, 20), ".fleet-policies-leader" + randomAlphaOfLengthBetween(1, 20), ".fleet-servers" + randomAlphaOfLengthBetween(1, 20), ".fleet-artifacts" + randomAlphaOfLengthBetween(1, 20), ".fleet-actions-results" + randomAlphaOfLengthBetween(1, 20), ".fleet-fileds" + randomAlphaOfLengthBetween(1, 20)).forEach(index -> {
        final IndexAbstraction dotFleetIndex = mockIndexAbstraction(index);
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetIndex), is(true));
        assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(dotFleetIndex), is(false));
        assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(dotFleetIndex), is(false));
        assertThat(role.indices().allowedIndicesMatcher("indices:foo").test(dotFleetIndex), is(false));
    });
    final IndexAbstraction dotFleetSecretsIndex = mockIndexAbstraction(".fleet-secrets" + randomAlphaOfLengthBetween(1, 20));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(dotFleetSecretsIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(dotFleetSecretsIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(dotFleetSecretsIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(dotFleetSecretsIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher("indices:foo").test(dotFleetSecretsIndex), is(false));
    final TransportRequest request = mock(TransportRequest.class);
    assertThat(role.cluster().check("cluster:admin/fleet/secrets/get", request, authentication), is(true));
    assertThat(role.cluster().check("cluster:admin/fleet/secrets/post", request, authentication), is(false));
    assertThat(role.cluster().check("cluster:admin/fleet/secrets/delete", request, authentication), is(false));
    final IndexAbstraction apmSampledTracesIndex = mockIndexAbstraction("traces-apm.sampled-" + randomAlphaOfLengthBetween(1, 20));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportBulkAction.NAME).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiGetAction.NAME).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(IndicesStatsAction.NAME).test(apmSampledTracesIndex), is(true));
    assertThat(role.indices().allowedIndicesMatcher(TransportDeleteIndexAction.TYPE.name()).test(apmSampledTracesIndex), is(false));
    assertThat(role.indices().allowedIndicesMatcher(TransportUpdateSettingsAction.TYPE.name()).test(apmSampledTracesIndex), is(false));
    final String privilegeName = randomAlphaOfLengthBetween(3, 16);
    assertThat(role.application().grants(ApplicationPrivilegeTests.createPrivilege(kibanaApplication, privilegeName, allowedApplicationActionPattern), "*"), is(true));
    final String otherApplication = randomValueOtherThanMany(s -> s.startsWith("kibana"), () -> randomAlphaOfLengthBetween(3, 8)) + "-" + randomAlphaOfLengthBetween(8, 24);
    assertThat(role.application().grants(ApplicationPrivilegeTests.createPrivilege(otherApplication, privilegeName, allowedApplicationActionPattern), "*"), is(false));
    assertThat(role.application().grants(ApplicationPrivilegeTests.createPrivilege(kibanaApplication, privilegeName, randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 16))), "*"), is(false));
}
235575.611192elasticsearch
public void testTruncation() {
    ZoneId zoneId = ZoneId.of("Etc/GMT-10");
    Literal dateTime = l(dateTime(2019, 9, 3, 18, 10, 37, 123456789));
    TemporalAmount period = Period.ofYears(2019).plusMonths(10);
    Literal yearToMonth = intervalLiteral(period, INTERVAL_YEAR_TO_MONTH);
    TemporalAmount duration = Duration.ofDays(105).plusHours(2).plusMinutes(45).plusSeconds(55).plusNanos(123456789);
    Literal dayToSecond = intervalLiteral(duration, INTERVAL_DAY_TO_SECOND);
    assertEquals("2000-01-01T00:00:00.000+10:00", DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("millennia"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2000-01-01T00:00:00.000+10:00", DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("CENTURY"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2010-01-01T00:00:00.000+10:00", DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("decades"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-01-01T00:00:00.000+10:00", DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("years"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-07-01T00:00:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("quarters"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-01T00:00:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("month"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-02T00:00:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("weeks"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T00:00:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("days"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:00:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("hh"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:00.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("mi"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:37.000+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("second"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:37.123+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("ms"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:37.123456+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("mcs"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("nanoseconds"), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("+2000-0", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("millennia"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2000-0", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("CENTURY"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2010-0", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("decades"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-0", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("years"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-9", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("quarters"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("month"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("days"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("hh"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("mi"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("second"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("ms"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("mcs"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+2019-10", toString((IntervalYearMonth) new DateTrunc(Source.EMPTY, l("nanoseconds"), yearToMonth, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("millennia"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("CENTURY"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("decades"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("years"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("quarters"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+0 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("month"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 00:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("days"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:00:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("hh"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:45:00", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("mi"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:45:55", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("second"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:45:55.123", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("ms"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:45:55.123", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("microseconds"), dayToSecond, null).makePipe().asProcessor().process(null)));
    assertEquals("+105 02:45:55.123", toString((IntervalDayTime) new DateTrunc(Source.EMPTY, l("nanoseconds"), dayToSecond, null).makePipe().asProcessor().process(null)));
}
232122.271267gwt
public String[] loadSortedRegionCodes() {
    return new String[] { "AF", "AX", "AL", "DZ", "AS", "AD", "AO", "AI", "AQ", "AG", "AR", "AM", "AW", "AC", "AU", "AT", "AZ", "BS", "BH", "BD", "BB", "BY", "BE", "BZ", "BJ", "BM", "BT", "BO", "BA", "BW", "BV", "BR", "IO", "VG", "BN", "BG", "BF", "BI", "KH", "CM", "CA", "IC", "CV", "BQ", "KY", "CF", "EA", "TD", "CL", "CN", "CX", "CP", "CC", "CO", "KM", "CG", "CD", "CK", "CR", "CI", "HR", "CU", "CW", "CY", "CZ", "DK", "DG", "DJ", "DM", "DO", "EC", "EG", "SV", "GQ", "ER", "EE", "ET", "EU", "EZ", "FK", "FO", "FJ", "FI", "FR", "GF", "PF", "TF", "GA", "GM", "GE", "DE", "GH", "GI", "GR", "GL", "GD", "GP", "GU", "GT", "GG", "GN", "GW", "GY", "HT", "HM", "HN", "HK", "HU", "IS", "IN", "ID", "IR", "IQ", "IE", "IM", "IL", "IT", "JM", "JP", "JE", "JO", "KZ", "KE", "KI", "XK", "KW", "KG", "LA", "LV", "LB", "LS", "LR", "LY", "LI", "LT", "LU", "MO", "MK", "MG", "MW", "MY", "MV", "ML", "MT", "MH", "MQ", "MR", "MU", "YT", "MX", "FM", "MD", "MC", "MN", "ME", "MS", "MA", "MZ", "MM", "NA", "NR", "NP", "NL", "NC", "NZ", "NI", "NE", "NG", "NU", "NF", "MP", "KP", "NO", "OM", "QO", "PK", "PW", "PS", "PA", "PG", "PY", "PE", "PH", "PN", "PL", "PT", "XA", "XB", "PR", "QA", "RE", "RO", "RU", "RW", "WS", "SM", "ST", "SA", "SN", "RS", "SC", "SL", "SG", "SX", "SK", "SI", "SB", "SO", "ZA", "GS", "KR", "SS", "ES", "LK", "BL", "SH", "KN", "LC", "MF", "PM", "VC", "SD", "SR", "SJ", "SZ", "SE", "CH", "SY", "TW", "TJ", "TZ", "TH", "TL", "TG", "TK", "TO", "TT", "TA", "TN", "TR", "TM", "TC", "TV", "UM", "VI", "UG", "UA", "AE", "GB", "UN", "US", "UY", "UZ", "VU", "VA", "VE", "VN", "WF", "EH", "YE", "ZM", "ZW" };
}
232122.271267gwt
public String[] loadSortedRegionCodes() {
    return new String[] { "XA", "XB", "GN", "GW", "YE", "TR", "TM", "QO", "BT", "CF", "CN", "HK", "MO", "DK", "EC", "ER", "BB", "PG", "BR", "PY", "BH", "BS", "PA", "PS", "PK", "BN", "JP", "BE", "MU", "MR", "JM", "TD", "IL", "CA", "GA", "GH", "IC", "MP", "QA", "CU", "TW", "NG", "NE", "NI", "NP", "BF", "BI", "BV", "GP", "WF", "VU", "BY", "PN", "LT", "IQ", "IR", "EA", "IS", "LI", "HU", "IN", "ID", "GT", "DJ", "KG", "GY", "DO", "DM", "TG", "AI", "AO", "AG", "AD", "TK", "BM", "LA", "ES", "EH", "CV", "BZ", "CP", "HR", "LR", "LY", "GR", "SA", "GQ", "TZ", "VE", "BD", "PW", "SB", "LV", "PR", "BA", "PL", "FR", "GF", "PF", "TF", "MF", "FO", "GI", "KE", "FI", "AE", "AR", "OM", "AF", "AC", "AZ", "DZ", "AL", "AW", "BG", "RU", "ZA", "AQ", "GS", "SS", "KZ", "KH", "HN", "ZW", "BO", "KW", "CC", "CI", "XK", "KM", "TN", "US", "UM", "VI", "AS", "GB", "IO", "VG", "DG", "CR", "CO", "EG", "ET", "NO", "GG", "GD", "GL", "GE", "TH", "HT", "TT", "TC", "TA", "RE", "PE", "SO", "KI", "FM", "CZ", "VA", "MZ", "NL", "BQ", "SX", "BW", "CM", "FJ", "SJ", "LK", "SZ", "SI", "SK", "CL", "KP", "PH", "VN", "ME", "SC", "SL", "CY", "SN", "RS", "TJ", "IT", "SG", "NZ", "NC", "CH", "SE", "NR", "PT", "FK", "MN", "MS", "HM", "DE", "MA", "MD", "MC", "LB", "MX", "AU", "BL", "VC", "PM", "ST", "KN", "SH", "LC", "CX", "SM", "TL", "UG", "UA", "UY", "UZ", "AM", "GU", "GM", "CG", "CD", "RW", "LU", "SY", "TV", "AT", "AX", "CK", "CW", "KY", "EZ", "EU", "TO", "JE", "EE", "IE", "JO", "NA", "NU", "MM", "RO", "UN", "SD", "SR", "LS", "WS", "SV", "NF", "BJ", "ZM", "KR", "MT", "ML", "MK", "MW", "IM", "MQ", "MV", "MY", "YT", "MH", "MG" };
}
232122.271267gwt
public String[] loadSortedRegionCodes() {
    return new String[] { "XA", "XB", "TT", "TR", "TC", "TM", "QO", "BT", "CF", "CN", "HK", "MO", "DK", "EC", "ER", "PG", "BR", "BB", "PY", "BH", "BS", "PA", "PS", "PK", "JP", "BE", "JM", "IL", "IC", "CA", "GA", "MP", "KP", "QA", "CU", "SZ", "TW", "NE", "NI", "NP", "BF", "BV", "GP", "GT", "WF", "GM", "BY", "PN", "LT", "IQ", "IR", "EA", "IS", "LI", "HU", "ID", "IN", "DJ", "KI", "KG", "TV", "DM", "DO", "TG", "AG", "AI", "AO", "AD", "TK", "BM", "ET", "ES", "EH", "CP", "HR", "LY", "HN", "GR", "SA", "BN", "BZ", "BJ", "GQ", "ZW", "AM", "AZ", "TZ", "NG", "VE", "BD", "ZM", "PW", "LV", "TO", "TL", "BA", "BW", "PR", "PL", "FR", "FO", "GF", "TF", "PF", "MF", "GI", "KE", "FI", "AE", "AR", "OM", "AF", "AC", "DZ", "AL", "BG", "RU", "ZA", "GS", "AQ", "KR", "SS", "KZ", "KH", "TD", "BO", "CC", "KW", "XK", "TN", "JO", "US", "UM", "VI", "AS", "MR", "GB", "IO", "VG", "GH", "DG", "CG", "CD", "CO", "CR", "EG", "CK", "CW", "NO", "SJ", "GG", "GL", "GD", "TH", "HT", "UG", "UA", "UY", "UZ", "TA", "RE", "PE", "SO", "SB", "NZ", "NU", "NA", "MQ", "ML", "MY", "MK", "MW", "YT", "MH", "MG", "MT", "MV", "IM", "FM", "CZ", "SY", "VA", "MZ", "AW", "SX", "NL", "BQ", "CM", "GE", "GN", "GW", "FJ", "LK", "SK", "SI", "CL", "PH", "CI", "VN", "KY", "SN", "SC", "RS", "TJ", "AT", "AX", "EE", "IE", "SG", "NC", "SL", "CH", "SE", "VU", "IT", "BL", "VC", "PM", "ST", "KN", "SM", "SH", "CX", "LC", "YE", "KM", "PT", "FK", "CV", "MN", "MS", "ME", "BI", "GY", "HM", "LA", "DE", "MA", "MC", "MD", "MU", "EZ", "EU", "MM", "LB", "MX", "JE", "AU", "RW", "LU", "NF", "NR", "LR", "LS", "UN", "CY", "SV", "WS", "RO", "GU", "SD", "SR" };
}
233796.5232138gwt
public void parse(XMLElement elem, String fieldName, JClassType type, UiBinderWriter writer) throws UnableToCompleteException {
    writer.getDesignTime().handleUIObject(writer, elem, fieldName);
    final Map<String, String> setterValues = new HashMap<String, String>();
    final Map<String, String> localizedValues = fetchLocalizedAttributeValues(elem, writer);
    final Map<String, String[]> adderValues = new HashMap<>();
    final Map<String, String> requiredValues = new HashMap<String, String>();
    final Map<String, JType> unfilledRequiredParams = new HashMap<String, JType>();
    final OwnerFieldClass ownerFieldClass = OwnerFieldClass.getFieldClass(type, writer.getLogger(), context);
    JAbstractMethod creator = null;
    OwnerField uiField = writer.getOwnerClass().getUiField(fieldName);
    if ((uiField == null) || (!uiField.isProvided())) {
        creator = writer.getOwnerClass().getUiFactoryMethod(type);
        if (creator == null) {
            creator = ownerFieldClass.getUiConstructor();
        }
        if (creator != null) {
            for (JParameter param : creator.getParameters()) {
                unfilledRequiredParams.put(param.getName(), param.getType());
            }
        }
    }
    for (Entry<String, String> property : localizedValues.entrySet()) {
        String key = property.getKey();
        String value = property.getValue();
        JType paramType = unfilledRequiredParams.get(key);
        if (paramType != null) {
            if (!isString(writer, paramType)) {
                writer.die(elem, "In %s, cannot apply message attribute to non-string " + "constructor argument %s.", paramType.getSimpleSourceName(), key);
            }
            requiredValues.put(key, value);
            unfilledRequiredParams.remove(key);
        } else {
            JMethod setter = ownerFieldClass.getSetter(key);
            JParameter[] params = setter == null ? null : setter.getParameters();
            if (setter == null || !(params.length == 1) || !isString(writer, params[0].getType())) {
                writer.die(elem, "No method found to apply message attribute %s", key);
            } else {
                setterValues.put(key, value);
            }
        }
    }
    for (int i = elem.getAttributeCount() - 1; i >= 0; i--) {
        XMLAttribute attribute = elem.getAttribute(i);
        if (attribute.getName().startsWith("xmlns:")) {
            continue;
        }
        String propertyName = attribute.getLocalName();
        if (setterValues.keySet().contains(propertyName) || requiredValues.containsKey(propertyName)) {
            writer.die(elem, "Duplicate attribute name: %s", propertyName);
        }
        if (unfilledRequiredParams.keySet().contains(propertyName)) {
            JType paramType = unfilledRequiredParams.get(propertyName);
            String value = elem.consumeAttributeWithDefault(attribute.getName(), null, paramType);
            if (value == null) {
                writer.die(elem, "Unable to parse %s as constructor argument " + "of type %s", attribute, paramType.getSimpleSourceName());
            }
            requiredValues.put(propertyName, value);
            unfilledRequiredParams.remove(propertyName);
        } else {
            JMethod setter = ownerFieldClass.getSetter(propertyName);
            if (setter != null) {
                String n = attribute.getName();
                String value = elem.consumeAttributeWithDefault(n, null, getParamTypes(setter));
                if (value == null) {
                    writer.die(elem, "Unable to parse %s.", attribute);
                }
                setterValues.put(propertyName, value);
            } else if (ADD_PROPERTY_TO_SETTER_MAP.containsKey(propertyName)) {
                String addMethod = ADD_PROPERTY_TO_SETTER_MAP.get(propertyName);
                JType stringType = writer.getOracle().findType(String.class.getName());
                if (ownerFieldClass.getRawType().findMethod(addMethod, new JType[] { stringType }) != null) {
                    String n = attribute.getName();
                    String[] value = elem.consumeStringArrayAttribute(n);
                    if (value == null) {
                        writer.die(elem, "Unable to parse %s.", attribute);
                    }
                    adderValues.put(addMethod, value);
                } else {
                    writer.die(elem, "Class %s has no appropriate %s() method", elem.getLocalName(), addMethod);
                }
            } else {
                writer.die(elem, "Class %s has no appropriate set%s() method", elem.getLocalName(), initialCap(propertyName));
            }
        }
    }
    if (!unfilledRequiredParams.isEmpty()) {
        StringBuilder b = new StringBuilder(String.format("%s missing required attribute(s):", elem));
        for (String name : unfilledRequiredParams.keySet()) {
            b.append(" ").append(name);
        }
        writer.die(elem, b.toString());
    }
    if (creator != null) {
        String[] args = makeArgsList(requiredValues, creator);
        if (creator instanceof JMethod) {
            JMethod factoryMethod = (JMethod) creator;
            String initializer;
            if (writer.getDesignTime().isDesignTime()) {
                String typeName = factoryMethod.getReturnType().getQualifiedSourceName();
                initializer = writer.getDesignTime().getProvidedFactory(typeName, factoryMethod.getName(), UiBinderWriter.asCommaSeparatedList(args));
            } else {
                initializer = String.format("owner.%s(%s)", factoryMethod.getName(), UiBinderWriter.asCommaSeparatedList(args));
            }
            writer.setFieldInitializer(fieldName, initializer);
        } else {
            writer.setFieldInitializerAsConstructor(fieldName, args);
        }
    }
    for (Map.Entry<String, String> entry : setterValues.entrySet()) {
        String propertyName = entry.getKey();
        String value = entry.getValue();
        writer.addStatement("%s.set%s(%s);", fieldName, initialCap(propertyName), value);
    }
    for (Map.Entry<String, String[]> entry : adderValues.entrySet()) {
        String addMethodName = entry.getKey();
        for (String s : entry.getValue()) {
            writer.addStatement("%s.%s(%s);", fieldName, addMethodName, s);
        }
    }
}
233869.3242129hadoop
public static void main(String[] args) throws InterruptedException {
    int numDataNodes = 0;
    int numRacks = 0;
    boolean inject = false;
    long startingBlockId = 1;
    int numBlocksPerDNtoInject = 0;
    int replication = 1;
    boolean checkDataNodeAddrConfig = false;
    long simulatedCapacityPerDn = SimulatedFSDataset.DEFAULT_CAPACITY;
    String bpid = null;
    Configuration conf = new HdfsConfiguration();
    for (int i = 0; i < args.length; i++) {
        if (args[i].equals("-n")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("missing number of nodes");
            }
            numDataNodes = Integer.parseInt(args[i]);
        } else if (args[i].equals("-racks")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing number of racks");
            }
            numRacks = Integer.parseInt(args[i]);
        } else if (args[i].equals("-r")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing replication factor");
            }
            replication = Integer.parseInt(args[i]);
        } else if (args[i].equals("-d")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing datanode dirs parameter");
            }
            dataNodeDirs = args[i];
        } else if (args[i].equals("-simulated")) {
            SimulatedFSDataset.setFactory(conf);
            if ((i + 1) < args.length && !args[i + 1].startsWith("-")) {
                simulatedCapacityPerDn = Long.parseLong(args[++i]);
            }
        } else if (args[i].equals("-bpid")) {
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing blockpoolid parameter");
            }
            bpid = args[i];
        } else if (args[i].equals("-inject")) {
            if (!FsDatasetSpi.Factory.getFactory(conf).isSimulated()) {
                System.out.print("-inject is valid only for simulated");
                printUsageExit();
            }
            inject = true;
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing starting block and number of blocks per DN to inject");
            }
            startingBlockId = Integer.parseInt(args[i]);
            if (++i >= args.length || args[i].startsWith("-")) {
                printUsageExit("Missing number of blocks to inject");
            }
            numBlocksPerDNtoInject = Integer.parseInt(args[i]);
        } else if (args[i].equals("-checkDataNodeAddrConfig")) {
            checkDataNodeAddrConfig = true;
        } else {
            printUsageExit();
        }
    }
    if (numDataNodes <= 0 || replication <= 0) {
        printUsageExit("numDataNodes and replication have to be greater than zero");
    }
    if (replication > numDataNodes) {
        printUsageExit("Replication must be less than or equal to numDataNodes");
    }
    if (bpid == null) {
        printUsageExit("BlockPoolId must be provided");
    }
    String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
    if (nameNodeAdr == null) {
        System.out.println("No name node address and port in config");
        System.exit(-1);
    }
    boolean simulated = FsDatasetSpi.Factory.getFactory(conf).isSimulated();
    System.out.println("Starting " + numDataNodes + (simulated ? " Simulated " : " ") + " Data Nodes that will connect to Name Node at " + nameNodeAdr);
    System.setProperty("test.build.data", dataNodeDirs);
    long[] simulatedCapacities = new long[numDataNodes];
    for (int i = 0; i < numDataNodes; ++i) {
        simulatedCapacities[i] = simulatedCapacityPerDn;
    }
    MiniDFSCluster mc = new MiniDFSCluster();
    try {
        mc.formatDataNodeDirs();
    } catch (IOException e) {
        System.out.println("Error formating data node dirs:" + e);
    }
    String[] rack4DataNode = null;
    if (numRacks > 0) {
        System.out.println("Using " + numRacks + " racks: ");
        String rackPrefix = getUniqueRackPrefix();
        rack4DataNode = new String[numDataNodes];
        for (int i = 0; i < numDataNodes; ++i) {
            rack4DataNode[i] = rackPrefix + "-" + i % numRacks;
            System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
        }
    }
    try {
        mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR, rack4DataNode, null, simulatedCapacities, false, checkDataNodeAddrConfig);
        Thread.sleep(10 * 1000);
        if (inject) {
            long blockSize = 10;
            System.out.println("Injecting " + numBlocksPerDNtoInject + " blocks in each DN starting at blockId " + startingBlockId + " with blocksize of " + blockSize);
            Block[] blocks = new Block[numBlocksPerDNtoInject];
            long blkid = startingBlockId;
            for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
                for (int i = 0; i < blocks.length; ++i) {
                    blocks[i] = new Block(blkid++, blockSize, CreateEditsLog.BLOCK_GENERATION_STAMP);
                }
                for (int i = 1; i <= replication; ++i) {
                    mc.injectBlocks((i_dn + i - 1) % numDataNodes, Arrays.asList(blocks), bpid);
                    System.out.println("Injecting blocks of dn " + i_dn + " into dn" + ((i_dn + i - 1) % numDataNodes));
                }
            }
            System.out.println("Created blocks from Bids " + startingBlockId + " to " + (blkid - 1));
        }
    } catch (IOException e) {
        System.out.println("Error creating data node:" + e);
    }
}
234172.036137hadoop
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, int[] dnHttpPorts, int[] dnIpcPorts) throws IOException {
    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks, NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
234172.036137hadoop
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, int[] dnHttpPorts, int[] dnIpcPorts) throws IOException {
    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks, NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
234172.036137hadoop
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, int[] dnHttpPorts, int[] dnIpcPorts) throws IOException {
    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks, NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
234172.036137hadoop
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays, int[] dnHttpPorts, int[] dnIpcPorts) throws IOException {
    startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks, NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig);
}
236368.251186hadoop
public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path sdir3 = new Path("/dir3");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    hdfs.mkdirs(sdir3);
    final Path foo_dir1 = new Path(sdir1, "foo");
    final Path bar1_dir1 = new Path(foo_dir1, "bar1");
    final Path bar_dir1 = new Path(sdir1, "bar");
    DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
    DFSTestUtil.createFile(hdfs, bar_dir1, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
    final Path foo_dir2 = new Path(sdir2, "foo");
    hdfs.rename(foo_dir1, foo_dir2);
    final Path bar_dir2 = new Path(sdir2, "bar");
    hdfs.rename(bar_dir1, bar_dir2);
    final Path bar1_dir2 = new Path(foo_dir2, "bar1");
    hdfs.setReplication(bar1_dir2, REPL_1);
    hdfs.setReplication(bar_dir2, REPL_1);
    restartClusterAndCheckImage(true);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s22");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s33");
    final Path foo_dir3 = new Path(sdir3, "foo");
    hdfs.rename(foo_dir2, foo_dir3);
    final Path bar_dir3 = new Path(sdir3, "bar");
    hdfs.rename(bar_dir2, bar_dir3);
    final Path bar1_dir3 = new Path(foo_dir3, "bar1");
    hdfs.setReplication(bar1_dir3, REPL_2);
    hdfs.setReplication(bar_dir3, REPL_2);
    restartClusterAndCheckImage(true);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s222");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s333");
    final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo/bar1");
    final Path bar1_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22", "foo/bar1");
    final Path bar1_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333", "foo/bar1");
    final Path bar_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "bar");
    final Path bar_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22", "bar");
    final Path bar_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir3);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s22);
    assertEquals(REPL_1, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s333);
    assertEquals(REPL_2, statusBar1.getReplication());
    FileStatus statusBar = hdfs.getFileStatus(bar_s1);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_dir3);
    assertEquals(REPL_2, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s22);
    assertEquals(REPL_1, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s333);
    assertEquals(REPL_2, statusBar.getReplication());
    hdfs.rename(foo_dir3, foo_dir2);
    hdfs.rename(bar_dir3, bar_dir2);
    hdfs.setReplication(bar1_dir2, REPL);
    hdfs.setReplication(bar_dir2, REPL);
    restartClusterAndCheckImage(true);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2222");
    final Path bar1_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "foo/bar1");
    final Path bar_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar1_s2222));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    assertTrue(hdfs.exists(bar_s2222));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s22);
    assertEquals(REPL_1, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s333);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_s2222);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar = hdfs.getFileStatus(bar_s1);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_dir2);
    assertEquals(REPL, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s22);
    assertEquals(REPL_1, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s333);
    assertEquals(REPL_2, statusBar.getReplication());
    statusBar = hdfs.getFileStatus(bar_s2222);
    assertEquals(REPL, statusBar.getReplication());
    hdfs.rename(foo_dir2, foo_dir1);
    hdfs.rename(bar_dir2, bar_dir1);
    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
    INodeDirectory sdir3Node = fsdir.getINode(sdir3.toString()).asDirectory();
    INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()).asReference();
    INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(5, fooWithCount.getReferenceCount());
    INodeDirectory foo = fooWithCount.asDirectory();
    DiffList<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
    assertEquals(4, fooDiffs.size());
    Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
    Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
    Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
    Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
    assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
    INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
    DiffList<FileDiff> bar1Diffs = bar1.getDiffs().asList();
    assertEquals(3, bar1Diffs.size());
    assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId());
    INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString()).asReference();
    INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(5, barWithCount.getReferenceCount());
    INodeFile bar = barWithCount.asFile();
    DiffList<FileDiff> barDiffs = bar.getDiffs().asList();
    assertEquals(4, barDiffs.size());
    assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
    assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
    assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId());
    assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId());
    restartClusterAndCheckImage(true);
    hdfs.delete(foo_dir1, true);
    hdfs.delete(bar_dir1, true);
    restartClusterAndCheckImage(true);
    final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", "foo/bar1");
    final Path bar_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar1_s22));
    assertTrue(hdfs.exists(bar1_s333));
    assertTrue(hdfs.exists(bar1_s2222));
    assertFalse(hdfs.exists(bar1_s1111));
    assertTrue(hdfs.exists(bar_s1));
    assertTrue(hdfs.exists(bar_s22));
    assertTrue(hdfs.exists(bar_s333));
    assertTrue(hdfs.exists(bar_s2222));
    assertFalse(hdfs.exists(bar_s1111));
    final Path foo_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222", "foo");
    fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
    fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(4, fooWithCount.getReferenceCount());
    foo = fooWithCount.asDirectory();
    fooDiffs = foo.getDiffs().asList();
    assertEquals(4, fooDiffs.size());
    assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
    bar1Diffs = bar1.getDiffs().asList();
    assertEquals(3, bar1Diffs.size());
    assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
    barRef = fsdir.getINode(bar_s2222.toString()).asReference();
    barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(4, barWithCount.getReferenceCount());
    bar = barWithCount.asFile();
    barDiffs = bar.getDiffs().asList();
    assertEquals(4, barDiffs.size());
    assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
}
234234.3413180hadoop
public void testAddRemoveDirectives() throws Exception {
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short) 0777)));
    proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short) 0)));
    proto.addCachePool(new CachePoolInfo("pool5").setMode(new FsPermission((short) 0007)).setOwnerName(unprivilegedUser.getShortUserName()));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
    CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
    long alphaId = addAsUnprivileged(alpha);
    long alphaId2 = addAsUnprivileged(alpha);
    assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo", alphaId == alphaId2);
    long betaId = addAsUnprivileged(beta);
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
        fail("expected an error when adding to a non-existent pool.");
    } catch (InvalidRequestException ioe) {
        GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
        fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
    } catch (AccessControlException e) {
        GenericTestUtils.assertExceptionContains("Permission denied while accessing pool", e);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
        fail("expected an error when adding a malformed path " + "to the cache directives.");
    } catch (IllegalArgumentException e) {
        GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
    }
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short) 1).setPool("").build());
        fail("expected an error when adding a cache " + "directive with an empty pool name.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
    }
    long deltaId = addAsUnprivileged(delta);
    try {
        addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/epsilon")).setPool("pool5").build());
        fail("expected an error when adding to a pool with " + "mode 007 (no permissions for pool owner).");
    } catch (AccessControlException e) {
        GenericTestUtils.assertExceptionContains("Permission denied while accessing pool", e);
    }
    long relativeId = addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
    RemoteIterator<CacheDirectiveEntry> iter;
    iter = dfs.listCacheDirectives(null);
    validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
    assertFalse(iter.hasNext());
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
    validateListAll(iter, alphaId, alphaId2, deltaId, relativeId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
    validateListAll(iter, betaId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
    validateListAll(iter, alphaId2);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
    validateListAll(iter, relativeId);
    dfs.removeCacheDirective(betaId);
    iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
    assertFalse(iter.hasNext());
    try {
        dfs.removeCacheDirective(betaId);
        fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }
    try {
        proto.removeCacheDirective(-42l);
        fail("expected an error when removing a negative ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("Invalid negative ID", e);
    }
    try {
        proto.removeCacheDirective(43l);
        fail("expected an error when removing a non-existent ID");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("No directive with ID", e);
    }
    dfs.removeCacheDirective(alphaId);
    dfs.removeCacheDirective(alphaId2);
    dfs.removeCacheDirective(deltaId);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short) 555).build());
    iter = dfs.listCacheDirectives(null);
    assertTrue(iter.hasNext());
    CacheDirectiveInfo modified = iter.next().getInfo();
    assertEquals(relativeId, modified.getId().longValue());
    assertEquals((short) 555, modified.getReplication().shortValue());
    dfs.removeCacheDirective(relativeId);
    iter = dfs.listCacheDirectives(null);
    assertFalse(iter.hasNext());
    CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
    long id = dfs.addCacheDirective(directive);
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short) 2).build());
    dfs.removeCacheDirective(id);
    DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
    dfs1.close();
    try {
        dfs1.listCacheDirectives(null);
        fail("listCacheDirectives using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.addCacheDirective(alpha);
        fail("addCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.modifyCacheDirective(alpha);
        fail("modifyCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.removeCacheDirective(alphaId);
        fail("removeCacheDirective using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
}
234700.088189hadoop
public void testQuotaCommands() throws Exception {
    DFSAdmin admin = new DFSAdmin(conf);
    final Path dir = new Path(PathUtils.getTestDir(getClass()).getPath(), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(dir));
    final int fileLen = 1024;
    final short replication = 5;
    final long spaceQuota = fileLen * replication * 15 / 8;
    final Path parent = new Path(dir, "test");
    assertTrue(dfs.mkdirs(parent));
    String[] args;
    args = new String[] { "-setQuota", "3K", parent.toString() };
    runCommand(admin, args, false);
    args = new String[] { "-setQuota", "3m", parent.toString() };
    runCommand(admin, args, false);
    args = new String[] { "-setQuota", "3", parent.toString() };
    runCommand(admin, args, false);
    runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
    assertEquals(2L << 40, dfs.getContentSummary(parent).getSpaceQuota());
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());
    final Path childDir0 = new Path(parent, "data0");
    assertTrue(dfs.mkdirs(childDir0));
    final Path childFile0 = new Path(parent, "datafile0");
    DFSTestUtil.createFile(dfs, childFile0, fileLen, replication, 0);
    ContentSummary c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 3);
    assertEquals(c.getSpaceConsumed(), fileLen * replication);
    assertEquals(c.getSpaceQuota(), spaceQuota);
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), -1);
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getSpaceConsumed(), fileLen * replication);
    final Path childDir1 = new Path(parent, "data1");
    boolean hasException = false;
    try {
        assertFalse(dfs.mkdirs(childDir1));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    OutputStream fout;
    final Path childFile1 = new Path(parent, "datafile1");
    hasException = false;
    try {
        fout = dfs.create(childFile1);
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getQuota(), -1);
    assertEquals(c.getSpaceQuota(), spaceQuota);
    runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getQuota(), -1);
    fout = dfs.create(childFile1, replication);
    try {
        fout.write(new byte[fileLen]);
        fout.close();
        Assert.fail();
    } catch (QuotaExceededException e) {
        IOUtils.closeStream(fout);
    }
    dfs.delete(childFile1, false);
    runCommand(admin, false, "-clrSpaceQuota", parent.toString());
    c = dfs.getContentSummary(parent);
    compareQuotaUsage(c, dfs, parent);
    assertEquals(c.getQuota(), -1);
    assertEquals(c.getSpaceQuota(), -1);
    DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
    args = new String[] { "-setQuota", "1", parent.toString() };
    runCommand(admin, args, false);
    runCommand(admin, false, "-setSpaceQuota", Integer.toString(fileLen), args[2]);
    args = new String[] { "-setQuota", "1", childDir0.toString() };
    runCommand(admin, args, false);
    hasException = false;
    try {
        assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    c = dfs.getContentSummary(childDir0);
    compareQuotaUsage(c, dfs, childDir0);
    assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
    assertEquals(c.getQuota(), 1);
    Path nonExistentPath = new Path(dir, "test1");
    assertFalse(dfs.exists(nonExistentPath));
    try {
        compareQuotaUsage(null, dfs, nonExistentPath);
        fail("Expected FileNotFoundException");
    } catch (FileNotFoundException fnfe) {
        GenericTestUtils.assertExceptionContains("File/Directory does not exist: " + nonExistentPath, fnfe);
    }
    args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", "1g", nonExistentPath.toString());
    assertTrue(dfs.isFile(childFile0));
    args[1] = childFile0.toString();
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
    args[0] = "-clrQuota";
    runCommand(admin, args, true);
    runCommand(admin, true, "-clrSpaceQuota", args[1]);
    args[1] = nonExistentPath.toString();
    runCommand(admin, args, true);
    runCommand(admin, true, "-clrSpaceQuota", args[1]);
    args = new String[] { "-setQuota", "0", parent.toString() };
    runCommand(admin, args, true);
    runCommand(admin, false, "-setSpaceQuota", "0", args[2]);
    args[1] = "-1";
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    args[1] = String.valueOf(Long.MAX_VALUE + 1L);
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    args[1] = "33aa1.5";
    runCommand(admin, args, true);
    runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
    runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);
    final String username = "userxx";
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "groupyy" });
    final String[] args2 = args.clone();
    ugi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            assertEquals("Not running as new user", username, UserGroupInformation.getCurrentUser().getShortUserName());
            DFSAdmin userAdmin = new DFSAdmin(conf);
            args2[1] = "100";
            runCommand(userAdmin, args2, true);
            runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
            String[] args3 = new String[] { "-clrQuota", parent.toString() };
            runCommand(userAdmin, args3, true);
            runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
            return null;
        }
    });
    runCommand(admin, false, "-clrQuota", "/");
    runCommand(admin, false, "-setQuota", "1000000", "/");
    runCommand(admin, false, "-clrQuota", "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
    runCommand(admin, false, "-clrSpaceQuota", parent.toString());
    final Path childDir2 = new Path(parent, "data2");
    assertTrue(dfs.mkdirs(childDir2));
    final Path childFile2 = new Path(childDir2, "datafile2");
    final Path childFile3 = new Path(childDir2, "datafile3");
    final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
    final long fileLen2 = DEFAULT_BLOCK_SIZE;
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
    runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
    DFSTestUtil.createFile(dfs, childFile2, fileLen2, replication, 0);
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
    hasException = false;
    try {
        DFSTestUtil.createFile(dfs, childFile3, fileLen2, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    final Path childFile4 = new Path(dir, "datafile2");
    final Path childFile5 = new Path(dir, "datafile3");
    runCommand(admin, false, "-clrQuota", "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
    runCommand(admin, false, "-clrSpaceQuota", "/");
    DFSTestUtil.createFile(dfs, childFile4, fileLen2, replication, 0);
    runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
    hasException = false;
    try {
        DFSTestUtil.createFile(dfs, childFile5, fileLen2, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertEquals(5, cluster.getNamesystem().getFSDirectory().getYieldCount());
    runCommand(admin, false, "-clrSpaceQuota", "/");
}
233915.9836139hadoop
private void loadQueue(String parentName, Element element, QueueProperties.Builder builder) throws AllocationConfigurationException {
    String queueName = FairSchedulerUtilities.trimQueueName(element.getAttribute("name"));
    if (queueName.contains(".")) {
        throw new AllocationConfigurationException("Bad fair scheduler config " + "file: queue name (" + queueName + ") shouldn't contain period.");
    }
    if (queueName.isEmpty()) {
        throw new AllocationConfigurationException("Bad fair scheduler config " + "file: queue name shouldn't be empty or " + "consist only of whitespace.");
    }
    if (parentName != null) {
        queueName = parentName + "." + queueName;
    }
    NodeList fields = element.getChildNodes();
    boolean isLeaf = true;
    boolean isReservable = false;
    boolean isMaxAMShareSet = false;
    for (int j = 0; j < fields.getLength(); j++) {
        Node fieldNode = fields.item(j);
        if (!(fieldNode instanceof Element)) {
            continue;
        }
        Element field = (Element) fieldNode;
        if (MIN_RESOURCES.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
            builder.minQueueResources(queueName, val.getResource());
        } else if (MAX_RESOURCES.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            builder.maxQueueResources(queueName, val);
        } else if (MAX_CHILD_RESOURCES.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            builder.maxChildQueueResources(queueName, val);
        } else if (MAX_RUNNING_APPS.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            int val = Integer.parseInt(text);
            builder.queueMaxApps(queueName, val);
        } else if (MAX_AMSHARE.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            float val = Float.parseFloat(text);
            val = Math.min(val, 1.0f);
            builder.queueMaxAMShares(queueName, val);
            isMaxAMShareSet = true;
        } else if (MAX_CONTAINER_ALLOCATION.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            ConfigurableResource val = FairSchedulerConfiguration.parseResourceConfigValue(text);
            builder.queueMaxContainerAllocation(queueName, val.getResource());
        } else if (WEIGHT.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            double val = Double.parseDouble(text);
            builder.queueWeights(queueName, (float) val);
        } else if (MIN_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            long val = Long.parseLong(text) * 1000L;
            builder.minSharePreemptionTimeouts(queueName, val);
        } else if (FAIR_SHARE_PREEMPTION_TIMEOUT.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            long val = Long.parseLong(text) * 1000L;
            builder.fairSharePreemptionTimeouts(queueName, val);
        } else if (FAIR_SHARE_PREEMPTION_THRESHOLD.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            float val = Float.parseFloat(text);
            val = Math.max(Math.min(val, 1.0f), 0.0f);
            builder.fairSharePreemptionThresholds(queueName, val);
        } else if (SCHEDULING_POLICY.equals(field.getTagName()) || SCHEDULING_MODE.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            SchedulingPolicy policy = SchedulingPolicy.parse(text);
            builder.queuePolicies(queueName, policy);
        } else if (ACL_SUBMIT_APPS.equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            builder.queueAcls(queueName, AccessType.SUBMIT_APP, new AccessControlList(text));
        } else if (ACL_ADMINISTER_APPS.equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            builder.queueAcls(queueName, AccessType.ADMINISTER_QUEUE, new AccessControlList(text));
        } else if (ACL_ADMINISTER_RESERVATIONS.equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            builder.reservationAcls(queueName, ReservationACL.ADMINISTER_RESERVATIONS, new AccessControlList(text));
        } else if (ACL_LIST_RESERVATIONS.equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            builder.reservationAcls(queueName, ReservationACL.LIST_RESERVATIONS, new AccessControlList(text));
        } else if (ACL_SUBMIT_RESERVATIONS.equals(field.getTagName())) {
            String text = ((Text) field.getFirstChild()).getData();
            builder.reservationAcls(queueName, ReservationACL.SUBMIT_RESERVATIONS, new AccessControlList(text));
        } else if (RESERVATION.equals(field.getTagName())) {
            isReservable = true;
            builder.reservableQueues(queueName);
            builder.configuredQueues(FSQueueType.PARENT, queueName);
        } else if (ALLOW_PREEMPTION_FROM.equals(field.getTagName())) {
            String text = getTrimmedTextData(field);
            if (!Boolean.parseBoolean(text)) {
                builder.nonPreemptableQueues(queueName);
            }
        } else if (QUEUE.endsWith(field.getTagName()) || POOL.equals(field.getTagName())) {
            loadQueue(queueName, field, builder);
            isLeaf = false;
        }
    }
    if (isLeaf && !"parent".equals(element.getAttribute("type"))) {
        if (!isReservable) {
            builder.configuredQueues(FSQueueType.LEAF, queueName);
        }
    } else {
        if (isReservable) {
            throw new AllocationConfigurationException(getErrorString(queueName, RESERVATION));
        } else if (isMaxAMShareSet) {
            throw new AllocationConfigurationException(getErrorString(queueName, MAX_AMSHARE));
        }
        builder.configuredQueues(FSQueueType.PARENT, queueName);
    }
    for (QueueACL acl : QueueACL.values()) {
        AccessType accessType = SchedulerUtils.toAccessType(acl);
        if (!builder.isAclDefinedForAccessType(queueName, accessType)) {
            AccessControlList defaultAcl = queueName.equals(ROOT) ? EVERYBODY_ACL : NOBODY_ACL;
            builder.queueAcls(queueName, accessType, defaultAcl);
        }
    }
    checkMinAndMaxResource(builder.getMinQueueResources(), builder.getMaxQueueResources(), queueName);
}
236702.575175hadoop
protected void render(Block html) {
    html.style(".metrics {margin-bottom:5px}");
    ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm);
    DIV<Hamlet> div = html.div().$class("metrics");
    Resource usedResources;
    Resource totalResources;
    Resource reservedResources;
    int allocatedContainers;
    if (clusterMetrics.getCrossPartitionMetricsAvailable()) {
        allocatedContainers = clusterMetrics.getTotalAllocatedContainersAcrossPartition();
        usedResources = clusterMetrics.getTotalUsedResourcesAcrossPartition().getResource();
        totalResources = clusterMetrics.getTotalClusterResourcesAcrossPartition().getResource();
        reservedResources = clusterMetrics.getTotalReservedResourcesAcrossPartition().getResource();
        Resources.subtractFrom(usedResources, reservedResources);
    } else {
        allocatedContainers = clusterMetrics.getContainersAllocated();
        usedResources = Resource.newInstance(clusterMetrics.getAllocatedMB(), (int) clusterMetrics.getAllocatedVirtualCores());
        totalResources = Resource.newInstance(clusterMetrics.getTotalMB(), (int) clusterMetrics.getTotalVirtualCores());
        reservedResources = Resource.newInstance(clusterMetrics.getReservedMB(), (int) clusterMetrics.getReservedVirtualCores());
    }
    div.h3("Cluster Metrics").table("#metricsoverview").thead().$class("ui-widget-header").tr().th().$class("ui-state-default").__("Apps Submitted").__().th().$class("ui-state-default").__("Apps Pending").__().th().$class("ui-state-default").__("Apps Running").__().th().$class("ui-state-default").__("Apps Completed").__().th().$class("ui-state-default").__("Containers Running").__().th().$class("ui-state-default").__("Used Resources").__().th().$class("ui-state-default").__("Total Resources").__().th().$class("ui-state-default").__("Reserved Resources").__().th().$class("ui-state-default").__("Physical Mem Used %").__().th().$class("ui-state-default").__("Physical VCores Used %").__().__().__().tbody().$class("ui-widget-content").tr().td(String.valueOf(clusterMetrics.getAppsSubmitted())).td(String.valueOf(clusterMetrics.getAppsPending())).td(String.valueOf(clusterMetrics.getAppsRunning())).td(String.valueOf(clusterMetrics.getAppsCompleted() + clusterMetrics.getAppsFailed() + clusterMetrics.getAppsKilled())).td(String.valueOf(allocatedContainers)).td(usedResources.getFormattedString()).td(totalResources.getFormattedString()).td(reservedResources.getFormattedString()).td(String.valueOf(clusterMetrics.getUtilizedMBPercent())).td(String.valueOf(clusterMetrics.getUtilizedVirtualCoresPercent())).__().__().__();
    div.h3("Cluster Nodes Metrics").table("#nodemetricsoverview").thead().$class("ui-widget-header").tr().th().$class("ui-state-default").__("Active Nodes").__().th().$class("ui-state-default").__("Decommissioning Nodes").__().th().$class("ui-state-default").__("Decommissioned Nodes").__().th().$class("ui-state-default").__("Lost Nodes").__().th().$class("ui-state-default").__("Unhealthy Nodes").__().th().$class("ui-state-default").__("Rebooted Nodes").__().th().$class("ui-state-default").__("Shutdown Nodes").__().__().__().tbody().$class("ui-widget-content").tr().td().a(url("nodes"), String.valueOf(clusterMetrics.getActiveNodes())).__().td().a(url("nodes/decommissioning"), String.valueOf(clusterMetrics.getDecommissioningNodes())).__().td().a(url("nodes/decommissioned"), String.valueOf(clusterMetrics.getDecommissionedNodes())).__().td().a(url("nodes/lost"), String.valueOf(clusterMetrics.getLostNodes())).__().td().a(url("nodes/unhealthy"), String.valueOf(clusterMetrics.getUnhealthyNodes())).__().td().a(url("nodes/rebooted"), String.valueOf(clusterMetrics.getRebootedNodes())).__().td().a(url("nodes/shutdown"), String.valueOf(clusterMetrics.getShutdownNodes())).__().__().__().__();
    String user = request().getRemoteUser();
    if (user != null) {
        UserMetricsInfo userMetrics = new UserMetricsInfo(this.rm, user);
        if (userMetrics.metricsAvailable()) {
            div.h3("User Metrics for " + user).table("#usermetricsoverview").thead().$class("ui-widget-header").tr().th().$class("ui-state-default").__("Apps Submitted").__().th().$class("ui-state-default").__("Apps Pending").__().th().$class("ui-state-default").__("Apps Running").__().th().$class("ui-state-default").__("Apps Completed").__().th().$class("ui-state-default").__("Containers Running").__().th().$class("ui-state-default").__("Containers Pending").__().th().$class("ui-state-default").__("Containers Reserved").__().th().$class("ui-state-default").__("Memory Used").__().th().$class("ui-state-default").__("Memory Pending").__().th().$class("ui-state-default").__("Memory Reserved").__().th().$class("ui-state-default").__("VCores Used").__().th().$class("ui-state-default").__("VCores Pending").__().th().$class("ui-state-default").__("VCores Reserved").__().__().__().tbody().$class("ui-widget-content").tr().td(String.valueOf(userMetrics.getAppsSubmitted())).td(String.valueOf(userMetrics.getAppsPending())).td(String.valueOf(userMetrics.getAppsRunning())).td(String.valueOf((userMetrics.getAppsCompleted() + userMetrics.getAppsFailed() + userMetrics.getAppsKilled()))).td(String.valueOf(userMetrics.getRunningContainers())).td(String.valueOf(userMetrics.getPendingContainers())).td(String.valueOf(userMetrics.getReservedContainers())).td(StringUtils.byteDesc(userMetrics.getAllocatedMB() * BYTES_IN_MB)).td(StringUtils.byteDesc(userMetrics.getPendingMB() * BYTES_IN_MB)).td(StringUtils.byteDesc(userMetrics.getReservedMB() * BYTES_IN_MB)).td(String.valueOf(userMetrics.getAllocatedVirtualCores())).td(String.valueOf(userMetrics.getPendingVirtualCores())).td(String.valueOf(userMetrics.getReservedVirtualCores())).__().__().__();
        }
    }
    SchedulerInfo schedulerInfo = new SchedulerInfo(this.rm);
    int schedBusy = clusterMetrics.getRmSchedulerBusyPercent();
    int rmEventQueueSize = clusterMetrics.getRmEventQueueSize();
    int schedulerEventQueueSize = clusterMetrics.getSchedulerEventQueueSize();
    div.h3("Scheduler Metrics").table("#schedulermetricsoverview").thead().$class("ui-widget-header").tr().th().$class("ui-state-default").__("Scheduler Type").__().th().$class("ui-state-default").__("Scheduling Resource Type").__().th().$class("ui-state-default").__("Minimum Allocation").__().th().$class("ui-state-default").__("Maximum Allocation").__().th().$class("ui-state-default").__("Maximum Cluster Application Priority").__().th().$class("ui-state-default").__("Scheduler Busy %").__().th().$class("ui-state-default").__("RM Dispatcher EventQueue Size").__().th().$class("ui-state-default").__("Scheduler Dispatcher EventQueue Size").__().__().__().tbody().$class("ui-widget-content").tr().td(String.valueOf(schedulerInfo.getSchedulerType())).td(String.valueOf(Arrays.toString(ResourceUtils.getResourcesTypeInfo().toArray(new ResourceTypeInfo[0])))).td(schedulerInfo.getMinAllocation().toString()).td(schedulerInfo.getMaxAllocation().toString()).td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())).td(schedBusy == -1 ? UNAVAILABLE : String.valueOf(schedBusy)).td(String.valueOf(rmEventQueueSize)).td(String.valueOf(schedulerEventQueueSize)).__().__().__();
    div.__();
}
235094.6815170hadoop
public void testReadEntitiesRelatesTo() throws Exception {
    TimelineFilterList rt = new TimelineFilterList(Operator.OR);
    rt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    rt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    int relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello") && !timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity ids' should have been hello and hello2");
        }
    }
    assertEquals(3, relatesToCnt);
    TimelineFilterList rt1 = new TimelineFilterList();
    rt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    rt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity id should have been hello1");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt2 = new TimelineFilterList(Operator.OR);
    rt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    rt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello") && !timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity ids' should have been hello and hello2");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt3 = new TimelineFilterList();
    rt3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1", "relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt3).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello")) {
            Assert.fail("Entity id should have been hello");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList rt4 = new TimelineFilterList();
    rt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    rt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt4).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList rt5 = new TimelineFilterList();
    rt5.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatedto1", "relatesto8"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt5).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container2", new HashSet<Object>(Arrays.asList("relatesto7"))));
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_container", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList rt6 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt6).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello")) {
            Assert.fail("Entity id should have been hello");
        }
    }
    assertEquals(0, relatesToCnt);
    TimelineFilterList list3 = new TimelineFilterList();
    list3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    list3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container1", new HashSet<Object>(Arrays.asList("relatesto4"))));
    TimelineFilterList list4 = new TimelineFilterList();
    list4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto1"))));
    list4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto2"))));
    TimelineFilterList combinedList = new TimelineFilterList(Operator.OR, list3, list4);
    TimelineFilterList rt7 = new TimelineFilterList(Operator.AND, combinedList, new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "container", new HashSet<Object>(Arrays.asList("relatesto3"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().relatesTo(rt7).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    relatesToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        relatesToCnt += timelineEntity.getRelatesToEntities().size();
        if (!timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity id should have been hello1");
        }
    }
    assertEquals(0, relatesToCnt);
}
235512.391210kafka
public void testConsumerGroupHeartbeatWithPreparingRebalanceClassicGroup() throws Exception {
    String groupId = "group-id";
    String memberId1 = "member-id-1";
    String memberId2 = "member-id-2";
    String memberId3 = "member-id-3";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
            put(memberId3, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 1))));
        }
    }));
    MetadataImage metadataImage = new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addRacks().build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(metadataImage).build();
    JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols1 = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1);
    protocols1.add(new JoinGroupRequestData.JoinGroupRequestProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)))))));
    JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols2 = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1);
    protocols2.add(new JoinGroupRequestData.JoinGroupRequestProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(barTopicName, 0)))))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1))))));
            put(memberId2, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(barTopicName, 0))))));
        }
    };
    ClassicGroup group = context.createClassicGroup(groupId);
    group.setProtocolName(Optional.ofNullable("range"));
    group.add(new ClassicGroupMember(memberId1, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", protocols1, assignments.get(memberId1)));
    group.add(new ClassicGroupMember(memberId2, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", protocols2, assignments.get(memberId2)));
    group.transitionTo(PREPARING_REBALANCE);
    group.transitionTo(COMPLETING_REBALANCE);
    group.transitionTo(STABLE);
    context.replay(CoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion()));
    context.commit();
    group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId("group-id").withMemberId(memberId1).withProtocols(protocols1).withSessionTimeoutMs(5000).withRebalanceTimeoutMs(10000).build());
    assertTrue(group.isInState(PREPARING_REBALANCE));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeatResult = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setRebalanceTimeoutMs(5000).setServerAssignor("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setTopicPartitions(Collections.emptyList()));
    ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1).setMemberEpoch(0).setPreviousMemberEpoch(0).setClientId("client-id").setClientHost("client-host").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(10000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols1))).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))).build();
    ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2).setMemberEpoch(0).setPreviousMemberEpoch(0).setClientId("client-id").setClientHost("client-host").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(10000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols2))).setAssignedPartitions(mkAssignment(mkTopicAssignment(barTopicId, 0))).build();
    ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(memberId3).setMemberEpoch(1).setPreviousMemberEpoch(0).setState(MemberState.UNRELEASED_PARTITIONS).setClientId("client").setClientHost("localhost/127.0.0.1").setServerAssignorName("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(5000).setAssignedPartitions(Collections.emptyMap()).build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 0), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, expectedMember1.assignedPartitions()), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, expectedMember2.assignedPartitions()), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 0), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember3), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                    put(1, new HashSet<>(Arrays.asList("rack1", "rack2")));
                }
            }));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                }
            }));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 1), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, assignor.targetPartitions(memberId1)), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId3, assignor.targetPartitions(memberId3)), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 1), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember3));
    assertRecordsEquals(expectedRecords, consumerGroupHeartbeatResult.records());
    assertTrue(joinResult.joinFuture.isDone());
    assertEquals(Errors.REBALANCE_IN_PROGRESS.code(), joinResult.joinFuture.get().errorCode());
    context.assertSessionTimeout(groupId, memberId1, 45000);
    context.assertSessionTimeout(groupId, memberId2, 45000);
    context.assertSessionTimeout(groupId, memberId3, 45000);
    context.rollback();
    assertEquals(group, context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false));
}
235428.531204kafka
public void testReconciliationInJoiningConsumerGroupWithEagerProtocol() throws Exception {
    String groupId = "group-id";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    Uuid zarTopicId = Uuid.randomUuid();
    String zarTopicName = "zar";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addTopic(zarTopicId, zarTopicName, 1).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }).withMember(new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 1))).withAssignmentEpoch(10)).build();
    ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
    group.setMetadataRefreshDeadline(Long.MAX_VALUE, 11);
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
        }
    }));
    JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(memberId1).withSessionTimeoutMs(5000).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())).build();
    GroupMetadataManagerTestContext.JoinResult joinResult1 = context.sendClassicGroupJoin(request);
    ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1).setMemberEpoch(11).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setState(MemberState.UNRELEASED_PARTITIONS).setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())))).build();
    List<CoordinatorRecord> expectedRecords1 = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
            put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, mkAssignment(mkTopicAssignment(barTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember1));
    assertEquals(expectedRecords1.size(), joinResult1.records.size());
    assertRecordsEquals(expectedRecords1.subList(0, 3), joinResult1.records.subList(0, 3));
    assertUnorderedListEquals(expectedRecords1.subList(3, 5), joinResult1.records.subList(3, 5));
    assertRecordsEquals(expectedRecords1.subList(5, 7), joinResult1.records.subList(5, 7));
    assertEquals(expectedMember1.state(), group.getOrMaybeCreateMember(memberId1, false).state());
    joinResult1.appendFuture.complete(null);
    JoinGroupResponseData joinResponse1 = joinResult1.joinFuture.get();
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResponse1);
    context.assertSessionTimeout(groupId, memberId1, request.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request.rebalanceTimeoutMs());
    context.verifyClassicGroupSyncToConsumerGroup(groupId, joinResponse1.memberId(), joinResponse1.generationId(), joinResponse1.protocolName(), joinResponse1.protocolType(), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(zarTopicName, 0)));
    context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10).setTopicPartitions(Collections.emptyList()));
    GroupMetadataManagerTestContext.JoinResult joinResult2 = context.sendClassicGroupJoin(request);
    ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(expectedMember1).setState(MemberState.STABLE).setPreviousMemberEpoch(11).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(zarTopicId, 0))).build();
    assertRecordsEquals(Collections.singletonList(CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember2)), joinResult2.records);
    assertEquals(expectedMember2.state(), group.getOrMaybeCreateMember(memberId1, false).state());
    joinResult2.appendFuture.complete(null);
    JoinGroupResponseData joinResponse2 = joinResult2.joinFuture.get();
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResponse2);
    context.assertSessionTimeout(groupId, memberId1, request.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request.rebalanceTimeoutMs());
    context.verifyClassicGroupSyncToConsumerGroup(groupId, joinResponse2.memberId(), joinResponse2.generationId(), joinResponse2.protocolName(), joinResponse2.protocolType(), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(zarTopicName, 0)));
}
232721.4134163wildfly
private void parseDataSource_1_0(final XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final DataSource.Attribute attribute = DataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JTA:
                {
                    final String value = rawAttributeText(reader, JTA.getXmlName());
                    if (value != null) {
                        JTA.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                throw ParseUtils.unexpectedAttribute(reader, i);
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(DATA_SOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> configPropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.DATASOURCE) {
                        list.add(operation);
                        list.addAll(configPropertiesOperations);
                        return;
                    } else {
                        if (DataSource.Tag.forName(reader.getLocalName()) == DataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(DataSource.Tag.forName(reader.getLocalName())) {
                        case CONNECTION_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(CONNECTION_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                CONNECTION_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                configPropertiesOperations.add(configOperation);
                                break;
                            }
                        case CONNECTION_URL:
                            {
                                String value = rawElementText(reader);
                                CONNECTION_URL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER_CLASS:
                            {
                                String value = rawElementText(reader);
                                DRIVER_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case POOL:
                            {
                                parsePool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
233421.8937140wildfly
private static TagLibraryInfo createTldInfo(final String location, final TldMetaData tldMetaData, final HashMap<String, TagLibraryInfo> ret) {
    String relativeLocation = location;
    String jarPath = null;
    if (relativeLocation != null && relativeLocation.startsWith("/WEB-INF/lib/")) {
        int pos = relativeLocation.indexOf('/', "/WEB-INF/lib/".length());
        if (pos > 0) {
            jarPath = relativeLocation.substring(pos);
            if (jarPath.startsWith("/")) {
                jarPath = jarPath.substring(1);
            }
            relativeLocation = relativeLocation.substring(0, pos);
        }
    }
    TagLibraryInfo tagLibraryInfo = new TagLibraryInfo();
    if (tldMetaData.getListeners() != null) {
        for (ListenerMetaData l : tldMetaData.getListeners()) {
            tagLibraryInfo.addListener(l.getListenerClass());
        }
    }
    tagLibraryInfo.setTlibversion(tldMetaData.getTlibVersion());
    if (tldMetaData.getJspVersion() == null) {
        tagLibraryInfo.setJspversion(tldMetaData.getVersion());
    } else {
        tagLibraryInfo.setJspversion(tldMetaData.getJspVersion());
    }
    tagLibraryInfo.setShortname(tldMetaData.getShortName());
    tagLibraryInfo.setUri(tldMetaData.getUri());
    if (tldMetaData.getDescriptionGroup() != null) {
        tagLibraryInfo.setInfo(tldMetaData.getDescriptionGroup().getDescription());
    }
    if (tldMetaData.getValidator() != null) {
        TagLibraryValidatorInfo tagLibraryValidatorInfo = new TagLibraryValidatorInfo();
        tagLibraryValidatorInfo.setValidatorClass(tldMetaData.getValidator().getValidatorClass());
        if (tldMetaData.getValidator().getInitParams() != null) {
            for (ParamValueMetaData paramValueMetaData : tldMetaData.getValidator().getInitParams()) {
                tagLibraryValidatorInfo.addInitParam(paramValueMetaData.getParamName(), paramValueMetaData.getParamValue());
            }
        }
        tagLibraryInfo.setValidator(tagLibraryValidatorInfo);
    }
    if (tldMetaData.getTags() != null) {
        for (TagMetaData tagMetaData : tldMetaData.getTags()) {
            TagInfo tagInfo = new TagInfo();
            tagInfo.setTagName(tagMetaData.getName());
            tagInfo.setTagClassName(tagMetaData.getTagClass());
            tagInfo.setTagExtraInfo(tagMetaData.getTeiClass());
            if (tagMetaData.getBodyContent() != null) {
                tagInfo.setBodyContent(tagMetaData.getBodyContent().toString());
            }
            tagInfo.setDynamicAttributes(tagMetaData.getDynamicAttributes());
            if (tagMetaData.getDescriptionGroup() != null) {
                DescriptionGroupMetaData descriptionGroup = tagMetaData.getDescriptionGroup();
                if (descriptionGroup.getIcons() != null && descriptionGroup.getIcons().value() != null && (descriptionGroup.getIcons().value().length > 0)) {
                    Icon icon = descriptionGroup.getIcons().value()[0];
                    tagInfo.setLargeIcon(icon.largeIcon());
                    tagInfo.setSmallIcon(icon.smallIcon());
                }
                tagInfo.setInfoString(descriptionGroup.getDescription());
                tagInfo.setDisplayName(descriptionGroup.getDisplayName());
            }
            if (tagMetaData.getVariables() != null) {
                for (VariableMetaData variableMetaData : tagMetaData.getVariables()) {
                    TagVariableInfo tagVariableInfo = new TagVariableInfo();
                    tagVariableInfo.setNameGiven(variableMetaData.getNameGiven());
                    tagVariableInfo.setNameFromAttribute(variableMetaData.getNameFromAttribute());
                    tagVariableInfo.setClassName(variableMetaData.getVariableClass());
                    tagVariableInfo.setDeclare(variableMetaData.getDeclare());
                    if (variableMetaData.getScope() != null) {
                        tagVariableInfo.setScope(variableMetaData.getScope().toString());
                    }
                    tagInfo.addTagVariableInfo(tagVariableInfo);
                }
            }
            if (tagMetaData.getAttributes() != null) {
                for (AttributeMetaData attributeMetaData : tagMetaData.getAttributes()) {
                    TagAttributeInfo tagAttributeInfo = new TagAttributeInfo();
                    tagAttributeInfo.setName(attributeMetaData.getName());
                    tagAttributeInfo.setType(attributeMetaData.getType());
                    tagAttributeInfo.setReqTime(attributeMetaData.getRtexprvalue());
                    tagAttributeInfo.setRequired(attributeMetaData.getRequired());
                    tagAttributeInfo.setFragment(attributeMetaData.getFragment());
                    if (attributeMetaData.getDeferredValue() != null) {
                        tagAttributeInfo.setDeferredValue("true");
                        tagAttributeInfo.setExpectedTypeName(attributeMetaData.getDeferredValue().getType());
                    } else {
                        tagAttributeInfo.setDeferredValue("false");
                    }
                    if (attributeMetaData.getDeferredMethod() != null) {
                        tagAttributeInfo.setDeferredMethod("true");
                        tagAttributeInfo.setMethodSignature(attributeMetaData.getDeferredMethod().getMethodSignature());
                    } else {
                        tagAttributeInfo.setDeferredMethod("false");
                    }
                    tagInfo.addTagAttributeInfo(tagAttributeInfo);
                }
            }
            tagLibraryInfo.addTagInfo(tagInfo);
        }
    }
    if (tldMetaData.getTagFiles() != null) {
        for (TagFileMetaData tagFileMetaData : tldMetaData.getTagFiles()) {
            TagFileInfo tagFileInfo = new TagFileInfo();
            tagFileInfo.setName(tagFileMetaData.getName());
            tagFileInfo.setPath(tagFileMetaData.getPath());
            tagLibraryInfo.addTagFileInfo(tagFileInfo);
        }
    }
    if (tldMetaData.getFunctions() != null) {
        for (FunctionMetaData functionMetaData : tldMetaData.getFunctions()) {
            FunctionInfo functionInfo = new FunctionInfo();
            functionInfo.setName(functionMetaData.getName());
            functionInfo.setFunctionClass(functionMetaData.getFunctionClass());
            functionInfo.setFunctionSignature(functionMetaData.getFunctionSignature());
            tagLibraryInfo.addFunctionInfo(functionInfo);
        }
    }
    if (jarPath == null && relativeLocation == null) {
        if (!ret.containsKey(tagLibraryInfo.getUri())) {
            ret.put(tagLibraryInfo.getUri(), tagLibraryInfo);
        }
    } else if (jarPath == null) {
        tagLibraryInfo.setLocation("");
        tagLibraryInfo.setPath(relativeLocation);
        if (!ret.containsKey(tagLibraryInfo.getUri())) {
            ret.put(tagLibraryInfo.getUri(), tagLibraryInfo);
        }
        ret.put(relativeLocation, tagLibraryInfo);
    } else {
        tagLibraryInfo.setLocation(relativeLocation);
        tagLibraryInfo.setPath(jarPath);
        if (!ret.containsKey(tagLibraryInfo.getUri())) {
            ret.put(tagLibraryInfo.getUri(), tagLibraryInfo);
        }
        if (jarPath.equals("META-INF/taglib.tld")) {
            ret.put(relativeLocation, tagLibraryInfo);
        }
    }
    return tagLibraryInfo;
}
235453.2728140wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final DeploymentUnit parent = Utils.getRootDeploymentUnit(deploymentUnit);
    final ServiceTarget serviceTarget = phaseContext.getServiceTarget();
    final ResourceRoot deploymentRoot = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT);
    if (!WeldDeploymentMarker.isPartOfWeldDeployment(deploymentUnit)) {
        if (deploymentUnit.getParent() == null && CdiAnnotationMarker.cdiAnnotationsPresent(deploymentUnit)) {
            WeldLogger.DEPLOYMENT_LOGGER.cdiAnnotationsButNotBeanArchive(deploymentUnit.getName());
        }
        return;
    }
    final ServiceName weldBootstrapServiceName = parent.getServiceName().append(WeldBootstrapService.SERVICE_NAME);
    final ServiceName weldBootstrapServiceInternalName = parent.getServiceName().append(WeldBootstrapService.INTERNAL_SERVICE_NAME);
    ServiceName weldStartServiceName = parent.getServiceName().append(WeldStartService.SERVICE_NAME);
    deploymentUnit.addToAttachmentList(Attachments.WEB_DEPENDENCIES, weldStartServiceName);
    final Set<ServiceName> dependencies = new HashSet<ServiceName>();
    if (deploymentUnit.getParent() != null) {
        return;
    }
    WeldLogger.DEPLOYMENT_LOGGER.startingServicesForCDIDeployment(phaseContext.getDeploymentUnit().getName());
    final Module module = deploymentUnit.getAttachment(Attachments.MODULE);
    final ModuleSpecification moduleSpecification = deploymentUnit.getAttachment(Attachments.MODULE_SPECIFICATION);
    final Set<BeanDeploymentArchiveImpl> beanDeploymentArchives = new HashSet<BeanDeploymentArchiveImpl>();
    final Map<ModuleIdentifier, BeanDeploymentModule> bdmsByIdentifier = new HashMap<ModuleIdentifier, BeanDeploymentModule>();
    final Map<ModuleIdentifier, ModuleSpecification> moduleSpecByIdentifier = new HashMap<ModuleIdentifier, ModuleSpecification>();
    final Map<ModuleIdentifier, EEModuleDescriptor> eeModuleDescriptors = new HashMap<>();
    final BeanDeploymentModule rootBeanDeploymentModule = deploymentUnit.getAttachment(WeldAttachments.BEAN_DEPLOYMENT_MODULE);
    putIfValueNotNull(eeModuleDescriptors, module.getIdentifier(), rootBeanDeploymentModule.getModuleDescriptor());
    bdmsByIdentifier.put(module.getIdentifier(), rootBeanDeploymentModule);
    moduleSpecByIdentifier.put(module.getIdentifier(), moduleSpecification);
    beanDeploymentArchives.addAll(rootBeanDeploymentModule.getBeanDeploymentArchives());
    final List<DeploymentUnit> subDeployments = deploymentUnit.getAttachmentList(Attachments.SUB_DEPLOYMENTS);
    final Set<ClassLoader> subDeploymentLoaders = new HashSet<ClassLoader>();
    final ServiceLoader<DeploymentUnitDependenciesProvider> dependenciesProviders = ServiceLoader.load(DeploymentUnitDependenciesProvider.class, WildFlySecurityManager.getClassLoaderPrivileged(WeldDeploymentProcessor.class));
    List<ClassLoader> loaders = new ArrayList<>(subDeployments.size() + 2);
    loaders.add(WildFlySecurityManager.getClassLoaderPrivileged(WeldDeploymentProcessor.class));
    loaders.add(module.getClassLoader());
    for (DeploymentUnit subDeployment : subDeployments) {
        loaders.add(subDeployment.getAttachment(Attachments.MODULE).getClassLoader());
    }
    Iterable<ModuleServicesProvider> moduleServicesProviders = ServiceLoader.load(ModuleServicesProvider.class, new CompositeClassLoader(loaders));
    getDependencies(deploymentUnit, dependencies, dependenciesProviders);
    for (DeploymentUnit subDeployment : subDeployments) {
        getDependencies(subDeployment, dependencies, dependenciesProviders);
        final Module subDeploymentModule = subDeployment.getAttachment(Attachments.MODULE);
        if (subDeploymentModule == null) {
            continue;
        }
        subDeploymentLoaders.add(subDeploymentModule.getClassLoader());
        final ModuleSpecification subDeploymentModuleSpec = subDeployment.getAttachment(Attachments.MODULE_SPECIFICATION);
        final BeanDeploymentModule bdm = subDeployment.getAttachment(WeldAttachments.BEAN_DEPLOYMENT_MODULE);
        if (bdm == null) {
            continue;
        }
        beanDeploymentArchives.addAll(bdm.getBeanDeploymentArchives());
        bdmsByIdentifier.put(subDeploymentModule.getIdentifier(), bdm);
        moduleSpecByIdentifier.put(subDeploymentModule.getIdentifier(), subDeploymentModuleSpec);
        putIfValueNotNull(eeModuleDescriptors, subDeploymentModule.getIdentifier(), bdm.getModuleDescriptor());
        final ResourceRoot subDeploymentRoot = subDeployment.getAttachment(Attachments.DEPLOYMENT_ROOT);
        for (Entry<Class<? extends Service>, Service> entry : ServiceLoaders.loadModuleServices(moduleServicesProviders, deploymentUnit, subDeployment, subDeploymentModule, subDeploymentRoot).entrySet()) {
            bdm.addService(entry.getKey(), Reflections.cast(entry.getValue()));
        }
    }
    for (Map.Entry<ModuleIdentifier, BeanDeploymentModule> entry : bdmsByIdentifier.entrySet()) {
        final ModuleSpecification bdmSpec = moduleSpecByIdentifier.get(entry.getKey());
        final BeanDeploymentModule bdm = entry.getValue();
        if (bdm == rootBeanDeploymentModule) {
            continue;
        }
        for (ModuleDependency dependency : bdmSpec.getSystemDependenciesSet()) {
            BeanDeploymentModule other = bdmsByIdentifier.get(dependency.getIdentifier());
            if (other != null && other != bdm) {
                bdm.addBeanDeploymentModule(other);
            }
        }
    }
    Map<Class<? extends Service>, Service> rootModuleServices = ServiceLoaders.loadModuleServices(moduleServicesProviders, deploymentUnit, deploymentUnit, module, deploymentRoot);
    for (Entry<Class<? extends Service>, Service> entry : rootModuleServices.entrySet()) {
        rootBeanDeploymentModule.addService(entry.getKey(), Reflections.cast(entry.getValue()));
    }
    for (final BeanDeploymentArchiveImpl additional : deploymentUnit.getAttachmentList(WeldAttachments.ADDITIONAL_BEAN_DEPLOYMENT_MODULES)) {
        beanDeploymentArchives.add(additional);
        for (Entry<Class<? extends Service>, Service> entry : rootModuleServices.entrySet()) {
            additional.getServices().add(entry.getKey(), Reflections.cast(entry.getValue()));
        }
    }
    final Collection<Metadata<Extension>> extensions = WeldPortableExtensions.getPortableExtensions(deploymentUnit).getExtensions();
    final WeldDeployment deployment = new WeldDeployment(beanDeploymentArchives, extensions, module, subDeploymentLoaders, deploymentUnit, rootBeanDeploymentModule, eeModuleDescriptors);
    installBootstrapConfigurationService(deployment, parent);
    final ServiceBuilder<?> weldBootstrapServiceBuilder = serviceTarget.addService(weldBootstrapServiceInternalName);
    final Consumer<WeldBootstrapService> weldBootstrapServiceConsumer = weldBootstrapServiceBuilder.provides(weldBootstrapServiceInternalName);
    weldBootstrapServiceBuilder.requires(TCCLSingletonService.SERVICE_NAME);
    final Supplier<ExecutorServices> executorServicesSupplier = weldBootstrapServiceBuilder.requires(WeldExecutorServices.SERVICE_NAME);
    final Supplier<ExecutorService> serverExecutorSupplier = weldBootstrapServiceBuilder.requires(Services.JBOSS_SERVER_EXECUTOR);
    Supplier<SecurityServices> securityServicesSupplier = null;
    Supplier<TransactionServices> weldTransactionServicesSupplier = null;
    final ServiceLoader<BootstrapDependencyInstaller> installers = ServiceLoader.load(BootstrapDependencyInstaller.class, WildFlySecurityManager.getClassLoaderPrivileged(WeldDeploymentProcessor.class));
    for (BootstrapDependencyInstaller installer : installers) {
        ServiceName serviceName = installer.install(serviceTarget, deploymentUnit, jtsEnabled);
        if (serviceName == null) {
            continue;
        }
        if (ServiceNames.WELD_SECURITY_SERVICES_SERVICE_NAME.getSimpleName().equals(serviceName.getSimpleName())) {
            securityServicesSupplier = weldBootstrapServiceBuilder.requires(serviceName);
        } else if (ServiceNames.WELD_TRANSACTION_SERVICES_SERVICE_NAME.getSimpleName().equals(serviceName.getSimpleName())) {
            weldTransactionServicesSupplier = weldBootstrapServiceBuilder.requires(serviceName);
        }
    }
    ServiceName deploymentServiceName = Utils.getRootDeploymentUnit(deploymentUnit).getServiceName();
    final WeldBootstrapService weldBootstrapService = new WeldBootstrapService(deployment, WildFlyWeldEnvironment.INSTANCE, deploymentUnit.getName(), weldBootstrapServiceConsumer, executorServicesSupplier, serverExecutorSupplier, securityServicesSupplier, weldTransactionServicesSupplier, deploymentServiceName, weldBootstrapServiceName);
    for (Entry<Class<? extends Service>, Service> entry : rootModuleServices.entrySet()) {
        weldBootstrapService.addWeldService(entry.getKey(), Reflections.cast(entry.getValue()));
    }
    weldBootstrapServiceBuilder.setInstance(weldBootstrapService);
    weldBootstrapServiceBuilder.install();
    final List<SetupAction> setupActions = getSetupActions(deploymentUnit);
    ServiceBuilder<?> startService = serviceTarget.addService(weldStartServiceName);
    for (final ServiceName dependency : dependencies) {
        startService.requires(dependency);
    }
    startService.requires(JndiNamingDependencyProcessor.serviceName(deploymentUnit));
    final CapabilityServiceSupport capabilities = deploymentUnit.getAttachment(Attachments.CAPABILITY_SERVICE_SUPPORT);
    boolean tx = capabilities.hasCapability("org.wildfly.transactions");
    for (final ServiceName jndiSubsystemDependency : getJNDISubsytemDependencies(tx)) {
        startService.requires(jndiSubsystemDependency);
    }
    final EarMetaData earConfig = deploymentUnit.getAttachment(org.jboss.as.ee.structure.Attachments.EAR_METADATA);
    if (earConfig == null || !earConfig.getInitializeInOrder()) {
        for (DeploymentUnit sub : subDeployments) {
            startService.requires(JndiNamingDependencyProcessor.serviceName(sub));
        }
    }
    final Supplier<WeldBootstrapService> bootstrapSupplier = startService.requires(weldBootstrapServiceName);
    startService.setInstance(new WeldStartService(bootstrapSupplier, setupActions, module.getClassLoader(), deploymentServiceName));
    startService.install();
}
247547.451171cassandra
public void testSingleOperationsWithLiterals() throws Throwable {
    createTable("CREATE TABLE %s (pk int, c1 tinyint, c2 smallint, v text, PRIMARY KEY(pk, c1, c2))");
    execute("INSERT INTO %S (pk, c1, c2, v) VALUES (2, 2, 2, 'test')");
    assertRows(execute("SELECT * FROM %s WHERE pk = 2 AND c1 = 1 + 1"), row(2, (byte) 2, (short) 2, "test"));
    assertInvalidMessage("Expected 1 byte for a tinyint (4)", "SELECT * FROM %s WHERE pk = 2 AND c1 = 1 + ?", 1);
    assertRows(execute("SELECT * FROM %s WHERE pk = 2 AND c1 = 1 + ?", (byte) 1), row(2, (byte) 2, (short) 2, "test"));
    assertRows(execute("SELECT * FROM %s WHERE pk = 1 + 1 AND c1 = 2"), row(2, (byte) 2, (short) 2, "test"));
    assertRows(execute("SELECT * FROM %s WHERE pk = 2 AND c1 = 2 AND c2 = 1 + 1"), row(2, (byte) 2, (short) 2, "test"));
    assertRows(execute("SELECT * FROM %s WHERE pk = 2 AND c1 = 2 AND c2 = 1 * (1 + 1)"), row(2, (byte) 2, (short) 2, "test"));
    assertInvalidMessage("Ambiguous '+' operation with args ? and 1: use type hint to disambiguate, example '(int) ?'", "SELECT * FROM %s WHERE pk = ? + 1 AND c1 = 2", 1);
    assertInvalidMessage("Ambiguous '+' operation with args ? and 1: use type hint to disambiguate, example '(int) ?'", "SELECT * FROM %s WHERE pk = 2 AND c1 = 2 AND c2 = 1 * (? + 1)", 1);
    assertRows(execute("SELECT 1 + 1, v FROM %s WHERE pk = 2 AND c1 = 2"), row(2, "test"));
    assertInvalidMessage("Ambiguous '+' operation with args 1 and ?: use type hint to disambiguate, example '(int) ?'", "SELECT 1 + ?, v FROM %s WHERE pk = 2 AND c1 = 2", 1);
    assertRows(execute("SELECT 100 + 50, v FROM %s WHERE pk = 2 AND c1 = 2"), row(150, "test"));
    assertInvalidMessage("Ambiguous '+' operation with args ? and 50: use type hint to disambiguate, example '(int) ?'", "SELECT ? + 50, v FROM %s WHERE pk = 2 AND c1 = 2", 100);
    createTable("CREATE TABLE %s (a tinyint, b smallint, c int, d bigint, e float, f double, g varint, h decimal, PRIMARY KEY(a, b))" + " WITH CLUSTERING ORDER BY (b DESC)");
    execute("INSERT INTO %S (a, b, c, d, e, f, g, h) VALUES (1, 2, 3, 4, 5.5, 6.5, 7, 8.5)");
    assertColumnNames(execute("SELECT a + 1, b + 1, c + 1, d + 1, e + 1, f + 1, g + 1, h + 1 FROM %s WHERE a = 1 AND b = 2"), "a + 1", "b + 1", "c + 1", "d + 1", "e + 1", "f + 1", "g + 1", "h + 1");
    assertRows(execute("SELECT a + 1, b + 1, c + 1, d + 1, e + 1, f + 1, g + 1, h + 1 FROM %s WHERE a = 1 AND b = 2"), row(2, 3, 4, 5L, 6.5F, 7.5, BigInteger.valueOf(8), BigDecimal.valueOf(9.5)));
    assertRows(execute("SELECT 2 + a, 2 + b, 2 + c, 2 + d, 2 + e, 2 + f, 2 + g, 2 + h FROM %s WHERE a = 1 AND b = 2"), row(3, 4, 5, 6L, 7.5F, 8.5, BigInteger.valueOf(9), BigDecimal.valueOf(10.5)));
    long bigInt = Integer.MAX_VALUE + 10L;
    assertRows(execute("SELECT a + " + bigInt + "," + " b + " + bigInt + "," + " c + " + bigInt + "," + " d + " + bigInt + "," + " e + " + bigInt + "," + " f + " + bigInt + "," + " g + " + bigInt + "," + " h + " + bigInt + " FROM %s WHERE a = 1 AND b = 2"), row(1L + bigInt, 2L + bigInt, 3L + bigInt, 4L + bigInt, 5.5 + bigInt, 6.5 + bigInt, BigInteger.valueOf(bigInt + 7), BigDecimal.valueOf(bigInt + 8.5)));
    assertRows(execute("SELECT a + 5.5, b + 5.5, c + 5.5, d + 5.5, e + 5.5, f + 5.5, g + 5.5, h + 5.5 FROM %s WHERE a = 1 AND b = 2"), row(6.5, 7.5, 8.5, 9.5, 11.0, 12.0, BigDecimal.valueOf(12.5), BigDecimal.valueOf(14.0)));
    assertRows(execute("SELECT a + 6.5, b + 6.5, c + 6.5, d + 6.5, e + 6.5, f + 6.5, g + 6.5, h + 6.5 FROM %s WHERE a = 1 AND b = 2"), row(7.5, 8.5, 9.5, 10.5, 12.0, 13.0, BigDecimal.valueOf(13.5), BigDecimal.valueOf(15.0)));
    assertColumnNames(execute("SELECT a - 1, b - 1, c - 1, d - 1, e - 1, f - 1, g - 1, h - 1 FROM %s WHERE a = 1 AND b = 2"), "a - 1", "b - 1", "c - 1", "d - 1", "e - 1", "f - 1", "g - 1", "h - 1");
    assertRows(execute("SELECT a - 1, b - 1, c - 1, d - 1, e - 1, f - 1, g - 1, h - 1 FROM %s WHERE a = 1 AND b = 2"), row(0, 1, 2, 3L, 4.5F, 5.5, BigInteger.valueOf(6), BigDecimal.valueOf(7.5)));
    assertRows(execute("SELECT a - 2, b - 2, c - 2, d - 2, e - 2, f - 2, g - 2, h - 2 FROM %s WHERE a = 1 AND b = 2"), row(-1, 0, 1, 2L, 3.5F, 4.5, BigInteger.valueOf(5), BigDecimal.valueOf(6.5)));
    assertRows(execute("SELECT a - 3, b - 3, 3 - 3, d - 3, e - 3, f - 3, g - 3, h - 3 FROM %s WHERE a = 1 AND b = 2"), row(-2, -1, 0, 1L, 2.5F, 3.5, BigInteger.valueOf(4), BigDecimal.valueOf(5.5)));
    assertRows(execute("SELECT a - " + bigInt + "," + " b - " + bigInt + "," + " c - " + bigInt + "," + " d - " + bigInt + "," + " e - " + bigInt + "," + " f - " + bigInt + "," + " g - " + bigInt + "," + " h - " + bigInt + " FROM %s WHERE a = 1 AND b = 2"), row(1L - bigInt, 2L - bigInt, 3L - bigInt, 4L - bigInt, 5.5 - bigInt, 6.5 - bigInt, BigInteger.valueOf(7 - bigInt), BigDecimal.valueOf(8.5 - bigInt)));
    assertRows(execute("SELECT a - 5.5, b - 5.5, c - 5.5, d - 5.5, e - 5.5, f - 5.5, g - 5.5, h - 5.5 FROM %s WHERE a = 1 AND b = 2"), row(-4.5, -3.5, -2.5, -1.5, 0.0, 1.0, BigDecimal.valueOf(1.5), BigDecimal.valueOf(3.0)));
    assertRows(execute("SELECT a - 6.5, b - 6.5, c - 6.5, d - 6.5, e - 6.5, f - 6.5, g - 6.5, h - 6.5 FROM %s WHERE a = 1 AND b = 2"), row(-5.5, -4.5, -3.5, -2.5, -1.0, 0.0, BigDecimal.valueOf(0.5), BigDecimal.valueOf(2.0)));
    assertColumnNames(execute("SELECT a * 1, b * 1, c * 1, d * 1, e * 1, f * 1, g * 1, h * 1 FROM %s WHERE a = 1 AND b = 2"), "a * 1", "b * 1", "c * 1", "d * 1", "e * 1", "f * 1", "g * 1", "h * 1");
    assertRows(execute("SELECT a * 1, b * 1, c * 1, d * 1, e * 1, f * 1, g * 1, h * 1 FROM %s WHERE a = 1 AND b = 2"), row(1, 2, 3, 4L, 5.5F, 6.5, BigInteger.valueOf(7), new BigDecimal("8.50")));
    assertRows(execute("SELECT a * 2, b * 2, c * 2, d * 2, e * 2, f * 2, g * 2, h * 2 FROM %s WHERE a = 1 AND b = 2"), row(2, 4, 6, 8L, 11.0F, 13.0, BigInteger.valueOf(14), new BigDecimal("17.00")));
    assertRows(execute("SELECT a * 3, b * 3, c * 3, d * 3, e * 3, f * 3, g * 3, h * 3 FROM %s WHERE a = 1 AND b = 2"), row(3, 6, 9, 12L, 16.5F, 19.5, BigInteger.valueOf(21), new BigDecimal("25.50")));
    assertRows(execute("SELECT a * " + bigInt + "," + " b * " + bigInt + "," + " c * " + bigInt + "," + " d * " + bigInt + "," + " e * " + bigInt + "," + " f * " + bigInt + "," + " g * " + bigInt + "," + " h * " + bigInt + " FROM %s WHERE a = 1 AND b = 2"), row(1L * bigInt, 2L * bigInt, 3L * bigInt, 4L * bigInt, 5.5 * bigInt, 6.5 * bigInt, BigInteger.valueOf(7 * bigInt), BigDecimal.valueOf(8.5 * bigInt)));
    assertRows(execute("SELECT a * 5.5, b * 5.5, c * 5.5, d * 5.5, e * 5.5, f * 5.5, g * 5.5, h * 5.5 FROM %s WHERE a = 1 AND b = 2"), row(5.5, 11.0, 16.5, 22.0, 30.25, 35.75, new BigDecimal("38.5"), new BigDecimal("46.75")));
    assertRows(execute("SELECT a * 6.5, b * 6.5, c * 6.5, d * 6.5, e * 6.5, 6.5 * f, g * 6.5, h * 6.5 FROM %s WHERE a = 1 AND b = 2"), row(6.5, 13.0, 19.5, 26.0, 35.75, 42.25, new BigDecimal("45.5"), BigDecimal.valueOf(55.25)));
    assertColumnNames(execute("SELECT a / 1, b / 1, c / 1, d / 1, e / 1, f / 1, g / 1, h / 1 FROM %s WHERE a = 1 AND b = 2"), "a / 1", "b / 1", "c / 1", "d / 1", "e / 1", "f / 1", "g / 1", "h / 1");
    assertRows(execute("SELECT a / 1, b / 1, c / 1, d / 1, e / 1, f / 1, g / 1, h / 1 FROM %s WHERE a = 1 AND b = 2"), row(1, 2, 3, 4L, 5.5F, 6.5, BigInteger.valueOf(7), new BigDecimal("8.5")));
    assertRows(execute("SELECT a / 2, b / 2, c / 2, d / 2, e / 2, f / 2, g / 2, h / 2 FROM %s WHERE a = 1 AND b = 2"), row(0, 1, 1, 2L, 2.75F, 3.25, BigInteger.valueOf(3), new BigDecimal("4.25")));
    assertRows(execute("SELECT a / 3, b / 3, c / 3, d / 3, e / 3, f / 3, g / 3, h / 3 FROM %s WHERE a = 1 AND b = 2"), row(0, 0, 1, 1L, 1.8333334F, 2.1666666666666665, BigInteger.valueOf(2), new BigDecimal("2.83333333333333333333333333333333")));
    assertRows(execute("SELECT a / " + bigInt + "," + " b / " + bigInt + "," + " c / " + bigInt + "," + " d / " + bigInt + "," + " e / " + bigInt + "," + " f / " + bigInt + "," + " g / " + bigInt + " FROM %s WHERE a = 1 AND b = 2"), row(1L / bigInt, 2L / bigInt, 3L / bigInt, 4L / bigInt, 5.5 / bigInt, 6.5 / bigInt, BigInteger.valueOf(7).divide(BigInteger.valueOf(bigInt))));
    assertRows(execute("SELECT a / 5.5, b / 5.5, c / 5.5, d / 5.5, e / 5.5, f / 5.5, g / 5.5, h / 5.5 FROM %s WHERE a = 1 AND b = 2"), row(0.18181818181818182, 0.36363636363636365, 0.5454545454545454, 0.7272727272727273, 1.0, 1.1818181818181819, new BigDecimal("1.27272727272727272727272727272727"), new BigDecimal("1.54545454545454545454545454545455")));
    assertRows(execute("SELECT a / 6.5, b / 6.5, c / 6.5, d / 6.5, e / 6.5, f / 6.5, g / 6.5, h / 6.5 FROM %s WHERE a = 1 AND b = 2"), row(0.15384615384615385, 0.3076923076923077, 0.46153846153846156, 0.6153846153846154, 0.8461538461538461, 1.0, new BigDecimal("1.07692307692307692307692307692308"), new BigDecimal("1.30769230769230769230769230769231")));
    assertColumnNames(execute("SELECT a %% 1, b %% 1, c %% 1, d %% 1, e %% 1, f %% 1, g %% 1, h %% 1 FROM %s WHERE a = 1 AND b = 2"), "a % 1", "b % 1", "c % 1", "d % 1", "e % 1", "f % 1", "g % 1", "h % 1");
    assertRows(execute("SELECT a %% 1, b %% 1, c %% 1, d %% 1, e %% 1, f %% 1, g %% 1, h %% 1 FROM %s WHERE a = 1 AND b = 2"), row(0, 0, 0, 0L, 0.5F, 0.5, BigInteger.valueOf(0), new BigDecimal("0.5")));
    assertRows(execute("SELECT a %% 2, b %% 2, c %% 2, d %% 2, e %% 2, f %% 2, g %% 2, h %% 2 FROM %s WHERE a = 1 AND b = 2"), row(1, 0, 1, 0L, 1.5F, 0.5, BigInteger.valueOf(1), new BigDecimal("0.5")));
    assertRows(execute("SELECT a %% 3, b %% 3, c %% 3, d %% 3, e %% 3, f %% 3, g %% 3, h %% 3 FROM %s WHERE a = 1 AND b = 2"), row(1, 2, 0, 1L, 2.5F, 0.5, BigInteger.valueOf(1), new BigDecimal("2.5")));
    assertRows(execute("SELECT a %% " + bigInt + "," + " b %% " + bigInt + "," + " c %% " + bigInt + "," + " d %% " + bigInt + "," + " e %% " + bigInt + "," + " f %% " + bigInt + "," + " g %% " + bigInt + "," + " h %% " + bigInt + " FROM %s WHERE a = 1 AND b = 2"), row(1L % bigInt, 2L % bigInt, 3L % bigInt, 4L % bigInt, 5.5 % bigInt, 6.5 % bigInt, BigInteger.valueOf(7 % bigInt), BigDecimal.valueOf(8.5 % bigInt)));
    assertRows(execute("SELECT a %% 5.5, b %% 5.5, c %% 5.5, d %% 5.5, e %% 5.5, f %% 5.5, g %% 5.5, h %% 5.5 FROM %s WHERE a = 1 AND b = 2"), row(1.0, 2.0, 3.0, 4.0, 0.0, 1.0, new BigDecimal("1.5"), new BigDecimal("3.0")));
    assertRows(execute("SELECT a %% 6.5, b %% 6.5, c %% 6.5, d %% 6.5, e %% 6.5, f %% 6.5, g %% 6.5, h %% 6.5 FROM %s WHERE a = 1 AND b = 2"), row(1.0, 2.0, 3.0, 4.0, 5.5, 0.0, new BigDecimal("0.5"), new BigDecimal("2.0")));
    assertRows(execute("SELECT a, b, 1 + 1, 2 - 1, 2 * 2, 2 / 1 , 2 %% 1, (int) -1 FROM %s WHERE a = 1 AND b = 2"), row((byte) 1, (short) 2, 2, 1, 4, 2, 0, -1));
}
246122.61171cassandra
public void testToJsonFct() throws Throwable {
    String typeName = createType("CREATE TYPE %s (a int, b uuid, c set<text>)");
    createTable("CREATE TABLE %s (" + "k int PRIMARY KEY, " + "asciival ascii, " + "bigintval bigint, " + "blobval blob, " + "booleanval boolean, " + "dateval date, " + "decimalval decimal, " + "doubleval double, " + "floatval float, " + "inetval inet, " + "intval int, " + "smallintval smallint, " + "textval text, " + "timeval time, " + "timestampval timestamp, " + "timeuuidval timeuuid, " + "tinyintval tinyint, " + "uuidval uuid," + "varcharval varchar, " + "varintval varint, " + "listval list<int>, " + "frozenlistval frozen<list<int>>, " + "setval set<uuid>, " + "frozensetval frozen<set<uuid>>, " + "mapval map<ascii, int>, " + "frozenmapval frozen<map<ascii, int>>, " + "tupleval frozen<tuple<int, ascii, uuid>>," + "udtval frozen<" + typeName + ">," + "durationval duration)");
    execute("INSERT INTO %s (k, textval) VALUES (?, to_json(1234))", 0);
    assertRows(execute("SELECT textval FROM %s WHERE k = ?", 0), row("1234"));
    assertRows(execute("SELECT textval FROM %s WHERE textval = to_json(1234) ALLOW FILTERING"), row("1234"));
    execute("UPDATE %s SET textval = to_json(-1234) WHERE k = ?", 0);
    assertRows(execute("SELECT textval FROM %s WHERE k = ?", 0), row("-1234"));
    assertRows(execute("SELECT textval FROM %s WHERE textval = to_json(-1234) ALLOW FILTERING"), row("-1234"));
    execute("DELETE FROM %s WHERE k = from_json(to_json(0))");
    assertEmpty(execute("SELECT textval FROM %s WHERE k = ?", 0));
    execute("INSERT INTO %s (k, textval) VALUES (?, to_json((int) ?))", 0, 123123);
    assertRows(execute("SELECT textval FROM %s WHERE k = ?", 0), row("123123"));
    assertRows(execute("SELECT textval FROM %s WHERE textval = to_json((int) ?) ALLOW FILTERING", 123123), row("123123"));
    execute("UPDATE %s SET textval = to_json((int) ?) WHERE k = ?", -123123, 0);
    assertRows(execute("SELECT textval FROM %s WHERE k = ?", 0), row("-123123"));
    assertRows(execute("SELECT textval FROM %s WHERE textval = to_json((int) ?) ALLOW FILTERING", -123123), row("-123123"));
    execute("DELETE FROM %s WHERE k = from_json(to_json((int) ?))", 0);
    assertEmpty(execute("SELECT textval FROM %s WHERE k = ?", 0));
    execute("INSERT INTO %s (k, asciival) VALUES (?, ?)", 0, "ascii text");
    assertRows(execute("SELECT k, to_json(asciival) FROM %s WHERE k = ?", 0), row(0, "\"ascii text\""));
    execute("INSERT INTO %s (k, asciival) VALUES (?, ?)", 0, "");
    assertRows(execute("SELECT k, to_json(asciival) FROM %s WHERE k = ?", 0), row(0, "\"\""));
    execute("INSERT INTO %s (k, bigintval) VALUES (?, ?)", 0, 123123123123L);
    assertRows(execute("SELECT k, to_json(bigintval) FROM %s WHERE k = ?", 0), row(0, "123123123123"));
    execute("INSERT INTO %s (k, bigintval) VALUES (?, ?)", 0, 0L);
    assertRows(execute("SELECT k, to_json(bigintval) FROM %s WHERE k = ?", 0), row(0, "0"));
    execute("INSERT INTO %s (k, bigintval) VALUES (?, ?)", 0, -123123123123L);
    assertRows(execute("SELECT k, to_json(bigintval) FROM %s WHERE k = ?", 0), row(0, "-123123123123"));
    execute("INSERT INTO %s (k, blobval) VALUES (?, ?)", 0, ByteBufferUtil.bytes(1));
    assertRows(execute("SELECT k, to_json(blobval) FROM %s WHERE k = ?", 0), row(0, "\"0x00000001\""));
    execute("INSERT INTO %s (k, blobval) VALUES (?, ?)", 0, ByteBufferUtil.EMPTY_BYTE_BUFFER);
    assertRows(execute("SELECT k, to_json(blobval) FROM %s WHERE k = ?", 0), row(0, "\"0x\""));
    execute("INSERT INTO %s (k, booleanval) VALUES (?, ?)", 0, true);
    assertRows(execute("SELECT k, to_json(booleanval) FROM %s WHERE k = ?", 0), row(0, "true"));
    execute("INSERT INTO %s (k, booleanval) VALUES (?, ?)", 0, false);
    assertRows(execute("SELECT k, to_json(booleanval) FROM %s WHERE k = ?", 0), row(0, "false"));
    execute("INSERT INTO %s (k, dateval) VALUES (?, ?)", 0, SimpleDateSerializer.dateStringToDays("1987-03-23"));
    assertRows(execute("SELECT k, to_json(dateval) FROM %s WHERE k = ?", 0), row(0, "\"1987-03-23\""));
    execute("INSERT INTO %s (k, decimalval) VALUES (?, ?)", 0, new BigDecimal("123123.123123"));
    assertRows(execute("SELECT k, to_json(decimalval) FROM %s WHERE k = ?", 0), row(0, "123123.123123"));
    execute("INSERT INTO %s (k, decimalval) VALUES (?, ?)", 0, new BigDecimal("-1.23E-12"));
    assertRows(execute("SELECT k, to_json(decimalval) FROM %s WHERE k = ?", 0), row(0, "-1.23E-12"));
    execute("INSERT INTO %s (k, doubleval) VALUES (?, ?)", 0, 123123.123123d);
    assertRows(execute("SELECT k, to_json(doubleval) FROM %s WHERE k = ?", 0), row(0, "123123.123123"));
    execute("INSERT INTO %s (k, doubleval) VALUES (?, ?)", 0, 123123d);
    assertRows(execute("SELECT k, to_json(doubleval) FROM %s WHERE k = ?", 0), row(0, "123123.0"));
    execute("INSERT INTO %s (k, floatval) VALUES (?, ?)", 0, 123.123f);
    assertRows(execute("SELECT k, to_json(floatval) FROM %s WHERE k = ?", 0), row(0, "123.123"));
    execute("INSERT INTO %s (k, floatval) VALUES (?, ?)", 0, 123123f);
    assertRows(execute("SELECT k, to_json(floatval) FROM %s WHERE k = ?", 0), row(0, "123123.0"));
    execute("INSERT INTO %s (k, inetval) VALUES (?, ?)", 0, InetAddress.getByName("127.0.0.1"));
    assertRows(execute("SELECT k, to_json(inetval) FROM %s WHERE k = ?", 0), row(0, "\"127.0.0.1\""));
    execute("INSERT INTO %s (k, inetval) VALUES (?, ?)", 0, InetAddress.getByName("::1"));
    assertRows(execute("SELECT k, to_json(inetval) FROM %s WHERE k = ?", 0), row(0, "\"0:0:0:0:0:0:0:1\""));
    execute("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 123123);
    assertRows(execute("SELECT k, to_json(intval) FROM %s WHERE k = ?", 0), row(0, "123123"));
    execute("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, 0);
    assertRows(execute("SELECT k, to_json(intval) FROM %s WHERE k = ?", 0), row(0, "0"));
    execute("INSERT INTO %s (k, intval) VALUES (?, ?)", 0, -123123);
    assertRows(execute("SELECT k, to_json(intval) FROM %s WHERE k = ?", 0), row(0, "-123123"));
    execute("INSERT INTO %s (k, smallintval) VALUES (?, ?)", 0, (short) 32767);
    assertRows(execute("SELECT k, to_json(smallintval) FROM %s WHERE k = ?", 0), row(0, "32767"));
    execute("INSERT INTO %s (k, smallintval) VALUES (?, ?)", 0, (short) 0);
    assertRows(execute("SELECT k, to_json(smallintval) FROM %s WHERE k = ?", 0), row(0, "0"));
    execute("INSERT INTO %s (k, smallintval) VALUES (?, ?)", 0, (short) -32768);
    assertRows(execute("SELECT k, to_json(smallintval) FROM %s WHERE k = ?", 0), row(0, "-32768"));
    execute("INSERT INTO %s (k, tinyintval) VALUES (?, ?)", 0, (byte) 127);
    assertRows(execute("SELECT k, to_json(tinyintval) FROM %s WHERE k = ?", 0), row(0, "127"));
    execute("INSERT INTO %s (k, tinyintval) VALUES (?, ?)", 0, (byte) 0);
    assertRows(execute("SELECT k, to_json(tinyintval) FROM %s WHERE k = ?", 0), row(0, "0"));
    execute("INSERT INTO %s (k, tinyintval) VALUES (?, ?)", 0, (byte) -128);
    assertRows(execute("SELECT k, to_json(tinyintval) FROM %s WHERE k = ?", 0), row(0, "-128"));
    execute("INSERT INTO %s (k, textval) VALUES (?, ?)", 0, "");
    assertRows(execute("SELECT k, to_json(textval) FROM %s WHERE k = ?", 0), row(0, "\"\""));
    execute("INSERT INTO %s (k, textval) VALUES (?, ?)", 0, "abcd");
    assertRows(execute("SELECT k, to_json(textval) FROM %s WHERE k = ?", 0), row(0, "\"abcd\""));
    execute("INSERT INTO %s (k, textval) VALUES (?, ?)", 0, "\u8422");
    assertRows(execute("SELECT k, to_json(textval) FROM %s WHERE k = ?", 0), row(0, "\"\u8422\""));
    execute("INSERT INTO %s (k, textval) VALUES (?, ?)", 0, "\u0000");
    assertRows(execute("SELECT k, to_json(textval) FROM %s WHERE k = ?", 0), row(0, "\"\\u0000\""));
    execute("INSERT INTO %s (k, timeval) VALUES (?, ?)", 0, 123L);
    assertRows(execute("SELECT k, to_json(timeval) FROM %s WHERE k = ?", 0), row(0, "\"00:00:00.000000123\""));
    execute("INSERT INTO %s (k, timeval) VALUES (?, from_json(?))", 0, "\"07:35:07.000111222\"");
    assertRows(execute("SELECT k, to_json(timeval) FROM %s WHERE k = ?", 0), row(0, "\"07:35:07.000111222\""));
    SimpleDateFormat sdf = new SimpleDateFormat("y-M-d");
    sdf.setTimeZone(TimeZone.getTimeZone("UDT"));
    execute("INSERT INTO %s (k, timestampval) VALUES (?, ?)", 0, sdf.parse("2014-01-01"));
    assertRows(execute("SELECT k, to_json(timestampval) FROM %s WHERE k = ?", 0), row(0, "\"2014-01-01 00:00:00.000Z\""));
    execute("INSERT INTO %s (k, timeuuidval) VALUES (?, ?)", 0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"));
    assertRows(execute("SELECT k, to_json(timeuuidval) FROM %s WHERE k = ?", 0), row(0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\""));
    execute("INSERT INTO %s (k, uuidval) VALUES (?, ?)", 0, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"));
    assertRows(execute("SELECT k, to_json(uuidval) FROM %s WHERE k = ?", 0), row(0, "\"6bddc89a-5644-11e4-97fc-56847afe9799\""));
    execute("INSERT INTO %s (k, varintval) VALUES (?, ?)", 0, new BigInteger("123123123123123123123"));
    assertRows(execute("SELECT k, to_json(varintval) FROM %s WHERE k = ?", 0), row(0, "123123123123123123123"));
    execute("INSERT INTO %s (k, listval) VALUES (?, ?)", 0, list(1, 2, 3));
    assertRows(execute("SELECT k, to_json(listval) FROM %s WHERE k = ?", 0), row(0, "[1, 2, 3]"));
    execute("INSERT INTO %s (k, listval) VALUES (?, ?)", 0, list());
    assertRows(execute("SELECT k, to_json(listval) FROM %s WHERE k = ?", 0), row(0, "null"));
    execute("INSERT INTO %s (k, frozenlistval) VALUES (?, ?)", 0, list(1, 2, 3));
    assertRows(execute("SELECT k, to_json(frozenlistval) FROM %s WHERE k = ?", 0), row(0, "[1, 2, 3]"));
    execute("INSERT INTO %s (k, setval) VALUES (?, ?)", 0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertRows(execute("SELECT k, to_json(setval) FROM %s WHERE k = ?", 0), row(0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]"));
    execute("INSERT INTO %s (k, setval) VALUES (?, ?)", 0, set());
    assertRows(execute("SELECT k, to_json(setval) FROM %s WHERE k = ?", 0), row(0, "null"));
    execute("INSERT INTO %s (k, frozensetval) VALUES (?, ?)", 0, set(UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9798"), (UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"))));
    assertRows(execute("SELECT k, to_json(frozensetval) FROM %s WHERE k = ?", 0), row(0, "[\"6bddc89a-5644-11e4-97fc-56847afe9798\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]"));
    execute("INSERT INTO %s (k, mapval) VALUES (?, ?)", 0, map("a", 1, "b", 2));
    assertRows(execute("SELECT k, to_json(mapval) FROM %s WHERE k = ?", 0), row(0, "{\"a\": 1, \"b\": 2}"));
    execute("INSERT INTO %s (k, mapval) VALUES (?, ?)", 0, map());
    assertRows(execute("SELECT k, to_json(mapval) FROM %s WHERE k = ?", 0), row(0, "null"));
    execute("INSERT INTO %s (k, frozenmapval) VALUES (?, ?)", 0, map("a", 1, "b", 2));
    assertRows(execute("SELECT k, to_json(frozenmapval) FROM %s WHERE k = ?", 0), row(0, "{\"a\": 1, \"b\": 2}"));
    execute("INSERT INTO %s (k, tupleval) VALUES (?, ?)", 0, tuple(1, "foobar", UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799")));
    assertRows(execute("SELECT k, to_json(tupleval) FROM %s WHERE k = ?", 0), row(0, "[1, \"foobar\", \"6bddc89a-5644-11e4-97fc-56847afe9799\"]"));
    execute("INSERT INTO %s (k, tupleval) VALUES (?, ?)", 0, tuple(1, "foobar", null));
    assertRows(execute("SELECT k, to_json(tupleval) FROM %s WHERE k = ?", 0), row(0, "[1, \"foobar\", null]"));
    execute("INSERT INTO %s (k, udtval) VALUES (?, {a: ?, b: ?, c: ?})", 0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"), set("foo", "bar"));
    assertRows(execute("SELECT k, to_json(udtval) FROM %s WHERE k = ?", 0), row(0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": [\"bar\", \"foo\"]}"));
    execute("INSERT INTO %s (k, udtval) VALUES (?, {a: ?, b: ?})", 0, 1, UUID.fromString("6bddc89a-5644-11e4-97fc-56847afe9799"));
    assertRows(execute("SELECT k, to_json(udtval) FROM %s WHERE k = ?", 0), row(0, "{\"a\": 1, \"b\": \"6bddc89a-5644-11e4-97fc-56847afe9799\", \"c\": null}"));
    execute("INSERT INTO %s (k, durationval) VALUES (?, 12µs)", 0);
    assertRows(execute("SELECT k, to_json(durationval) FROM %s WHERE k = ?", 0), row(0, "\"12us\""));
    execute("INSERT INTO %s (k, durationval) VALUES (?, P1Y1M2DT10H5M)", 0);
    assertRows(execute("SELECT k, to_json(durationval) FROM %s WHERE k = ?", 0), row(0, "\"1y1mo2d10h5m\""));
}
242100.1618184cassandra
public void testProgressBarrier() throws Throwable {
    EntropySource rng = new PcgRSUFast(1L, 1l);
    Supplier<Boolean> respond = bools().toGenerator().bind(rng);
    Supplier<TokenPlacementModel.ReplicationFactor> rfs = combine(ints(0, 3), ints(1, 5), bools(), ints(1, 5), (Integer dcs, Integer nodesPerDc, Boolean addAlternate, Integer nodesPerDcAlt) -> {
        if (dcs == 0)
            return new TokenPlacementModel.SimpleReplicationFactor(nodesPerDc);
        else if (addAlternate && nodesPerDcAlt.intValue() != nodesPerDc.intValue()) {
            int[] perDc = new int[dcs + 1];
            Arrays.fill(perDc, nodesPerDc);
            perDc[perDc.length - 1] = nodesPerDcAlt;
            return new TokenPlacementModel.NtsReplicationFactor(perDc);
        } else {
            return new TokenPlacementModel.NtsReplicationFactor(dcs, nodesPerDc);
        }
    }).toGenerator().bind(rng);
    Supplier<Integer> nodes = ints(15, 20).toGenerator().bind(rng);
    Supplier<ConsistencyLevel> cls = Surjections.pick(ConsistencyLevel.ALL, ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE).toGenerator().bind(rng);
    TokenPlacementModel.NodeFactory nodeFactory = TokenPlacementModel.nodeFactory();
    for (int run = 0; run < 100; run++) {
        TokenPlacementModel.ReplicationFactor rf = rfs.get();
        try (CMSTestBase.CMSSut sut = new CMSTestBase.CMSSut(AtomicLongBackedProcessor::new, false, rf)) {
            List<TokenPlacementModel.Node> allNodes = new ArrayList<>();
            TokenPlacementModel.Node node = null;
            int nodesInCluster = Math.max(rf.total(), nodes.get());
            for (int i = 1; i <= nodesInCluster; i++) {
                node = nodeFactory.make(i, (i % rf.dcs()) + 1, 1);
                allNodes.add(node);
                sut.service.commit(new Register(new NodeAddresses(node.addr()), new Location(node.dc(), node.rack()), NodeVersion.CURRENT));
                if (i < nodesInCluster)
                    sut.service.commit(new UnsafeJoin(node.nodeId(), Collections.singleton(node.longToken()), ClusterMetadataService.instance().placementProvider()));
            }
            sut.service.commit(new PrepareJoin(node.nodeId(), Collections.singleton(node.longToken()), ClusterMetadataService.instance().placementProvider(), true, false));
            for (int check = 0; check < 10; check++) {
                ClusterMetadata metadata = ClusterMetadata.current();
                ConsistencyLevel cl = cls.get();
                Set<InetAddressAndPort> responded = new ConcurrentSkipListSet<>();
                MessageDelivery delivery = new MessageDelivery() {

                    public <REQ, RSP> void sendWithCallback(Message<REQ> message, InetAddressAndPort to, RequestCallback<RSP> cb) {
                        if (respond.get()) {
                            responded.add(to);
                            cb.onResponse((Message<RSP>) message.responseWith(message.epoch()));
                        } else {
                            cb.onFailure(message.from(), RequestFailureReason.TIMEOUT);
                        }
                    }

                    public <REQ> void send(Message<REQ> message, InetAddressAndPort to) {
                    }

                    public <REQ, RSP> void sendWithCallback(Message<REQ> message, InetAddressAndPort to, RequestCallback<RSP> cb, ConnectionType specifyConnection) {
                    }

                    public <REQ, RSP> Future<Message<RSP>> sendWithResult(Message<REQ> message, InetAddressAndPort to) {
                        return null;
                    }

                    public <V> void respond(V response, Message<?> message) {
                    }
                };
                ProgressBarrier progressBarrier = ((MultiStepOperation<Epoch>) metadata.inProgressSequences.get(node.nodeId())).advance(metadata.epoch).barrier().withMessagingService(delivery);
                progressBarrier.await(cl, metadata);
                String dc = metadata.directory.location(node.nodeId()).datacenter;
                switch(cl) {
                    case ALL:
                        {
                            Set<InetAddressAndPort> replicas = metadata.lockedRanges.locked.get(LockedRanges.keyFor(metadata.epoch)).toPeers(rf.asKeyspaceParams().replication, metadata.placements, metadata.directory).stream().map(n -> metadata.directory.getNodeAddresses(n).broadcastAddress).collect(Collectors.toSet());
                            Set<InetAddressAndPort> collected = responded.stream().filter(replicas::contains).collect(Collectors.toSet());
                            int expected = rf.total();
                            Assert.assertTrue(String.format("Should have collected at least %d nodes but got %d." + "\nRF: %s" + "\nReplicas: %s" + "\nNodes: %s", expected, collected.size(), rf, replicas, collected), collected.size() >= expected);
                            break;
                        }
                    case QUORUM:
                        {
                            Set<InetAddressAndPort> replicas = metadata.lockedRanges.locked.get(LockedRanges.keyFor(metadata.epoch)).toPeers(rf.asKeyspaceParams().replication, metadata.placements, metadata.directory).stream().map(n -> metadata.directory.getNodeAddresses(n).broadcastAddress).collect(Collectors.toSet());
                            Set<InetAddressAndPort> collected = responded.stream().filter(replicas::contains).collect(Collectors.toSet());
                            int expected = rf.total() / 2 + 1;
                            Assert.assertTrue(String.format("Should have collected at least %d nodes but got %d." + "\nRF: %s" + "\nReplicas: %s" + "\nNodes: %s", expected, collected.size(), rf, replicas, collected), collected.size() >= expected);
                            break;
                        }
                    case LOCAL_QUORUM:
                        {
                            List<InetAddressAndPort> replicas = new ArrayList<>(metadata.lockedRanges.locked.get(LockedRanges.keyFor(metadata.epoch)).toPeers(rf.asKeyspaceParams().replication, metadata.placements, metadata.directory).stream().filter((n) -> metadata.directory.location(n).datacenter.equals(dc)).map(n -> metadata.directory.getNodeAddresses(n).broadcastAddress).collect(Collectors.toSet()));
                            replicas.sort(InetAddressAndPort::compareTo);
                            Set<InetAddressAndPort> collected = responded.stream().filter(replicas::contains).collect(Collectors.toSet());
                            int expected;
                            if (rf instanceof TokenPlacementModel.SimpleReplicationFactor)
                                expected = rf.total() / 2 + 1;
                            else
                                expected = rf.asMap().get(dc).totalCount / 2 + 1;
                            Assert.assertTrue(String.format("Should have collected at least %d nodes but got %d." + "\nRF: %s" + "\nReplicas: %s" + "\nCollected: %s. Responded: %s", expected, collected.size(), rf, replicas, collected, responded), collected.size() >= expected);
                            break;
                        }
                    case EACH_QUORUM:
                        {
                            Map<String, Integer> byDc = new HashMap<>();
                            metadata.lockedRanges.locked.get(LockedRanges.keyFor(metadata.epoch)).toPeers(rf.asKeyspaceParams().replication, metadata.placements, metadata.directory).forEach(n -> byDc.compute(metadata.directory.location(n).datacenter, (k, v) -> v == null ? 1 : v + 1));
                            if (rf instanceof TokenPlacementModel.SimpleReplicationFactor) {
                                int actual = byDc.get(dc);
                                int expected = rf.asMap().get(dc).totalCount / 2 + 1;
                                Assert.assertTrue(String.format("Shuold have collected at least %d nodes, but got %d." + "\nRF: %s" + "\nNodes: %s", expected, byDc.size(), rf, byDc), actual >= expected);
                            } else {
                                for (Map.Entry<String, Integer> e : byDc.entrySet()) {
                                    int actual = e.getValue();
                                    int expected = rf.asMap().get(e.getKey()).totalCount / 2 + 1;
                                    Assert.assertTrue(String.format("Shuold have collected at least %d nodes, but got %d." + "\nRF: %s" + "\nNodes: %s", expected, byDc.size(), rf, byDc), actual >= expected);
                                }
                            }
                            break;
                        }
                    case ONE:
                        Set<InetAddressAndPort> replicas = metadata.lockedRanges.locked.get(LockedRanges.keyFor(metadata.epoch)).toPeers(rf.asKeyspaceParams().replication, metadata.placements, metadata.directory).stream().map(n -> metadata.directory.getNodeAddresses(n).broadcastAddress).collect(Collectors.toSet());
                        Assert.assertTrue(String.format("Should have collected at least one of the replicas %s, but got %s." + "\nRF: %s.\nNodes: %s.", replicas, responded, rf, allNodes), responded.stream().anyMatch(replicas::contains));
                        break;
                }
            }
        }
    }
}
246884.581166elasticsearch
public void testSimpleReverseNestedToRoot() throws Exception {
    assertNoFailuresAndResponse(prepareSearch("idx1").addAggregation(nested("nested1", "nested1").subAggregation(terms("field2").field("nested1.field2").subAggregation(reverseNested("nested1_to_field1").subAggregation(terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())))))), response -> {
        Nested nested = response.getAggregations().get("nested1");
        assertThat(nested, notNullValue());
        assertThat(nested.getName(), equalTo("nested1"));
        assertThat(nested.getDocCount(), equalTo(25L));
        assertThat(nested.getAggregations().asList().isEmpty(), is(false));
        Terms usernames = nested.getAggregations().get("field2");
        assertThat(usernames, notNullValue());
        assertThat(usernames.getBuckets().size(), equalTo(9));
        List<Terms.Bucket> usernameBuckets = new ArrayList<>(usernames.getBuckets());
        Terms.Bucket bucket = usernameBuckets.get(0);
        assertThat(bucket.getKeyAsString(), equalTo("1"));
        assertThat(bucket.getDocCount(), equalTo(6L));
        ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1");
        assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L));
        Terms tags = reverseNested.getAggregations().get("field1");
        assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags));
        List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(6));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x"));
        assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(1);
        assertThat(bucket.getKeyAsString(), equalTo("4"));
        assertThat(bucket.getDocCount(), equalTo(4L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(5));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e"));
        assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(2);
        assertThat(bucket.getKeyAsString(), equalTo("7"));
        assertThat(bucket.getDocCount(), equalTo(3L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(5));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(3);
        assertThat(bucket.getKeyAsString(), equalTo("2"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(3));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(4);
        assertThat(bucket.getKeyAsString(), equalTo("3"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(3));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(5);
        assertThat(bucket.getKeyAsString(), equalTo("5"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(4));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(6);
        assertThat(bucket.getKeyAsString(), equalTo("6"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(4));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(7);
        assertThat(bucket.getKeyAsString(), equalTo("8"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(4));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
        bucket = usernameBuckets.get(8);
        assertThat(bucket.getKeyAsString(), equalTo("9"));
        assertThat(bucket.getDocCount(), equalTo(2L));
        reverseNested = bucket.getAggregations().get("nested1_to_field1");
        tags = reverseNested.getAggregations().get("field1");
        tagsBuckets = new ArrayList<>(tags.getBuckets());
        assertThat(tagsBuckets.size(), equalTo(4));
        assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
        assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
        assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
        assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1L));
        assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z"));
        assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1L));
    });
}
244517.113195elasticsearch
public void testSameParentDocHavingMultipleBuckets() throws Exception {
    XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").field("dynamic", "strict").startObject("properties").startObject("id").field("type", "long").endObject().startObject("category").field("type", "nested").startObject("properties").startObject("name").field("type", "keyword").endObject().endObject().endObject().startObject("sku").field("type", "nested").startObject("properties").startObject("sku_type").field("type", "keyword").endObject().startObject("colors").field("type", "nested").startObject("properties").startObject("name").field("type", "keyword").endObject().endObject().endObject().endObject().endObject().endObject().endObject().endObject();
    assertAcked(prepareCreate("idx3").setSettings(indexSettings(1, 0)).setMapping(mapping));
    prepareIndex("idx3").setId("1").setRefreshPolicy(IMMEDIATE).setSource(jsonBuilder().startObject().startArray("sku").startObject().field("sku_type", "bar1").startArray("colors").startObject().field("name", "red").endObject().startObject().field("name", "green").endObject().startObject().field("name", "yellow").endObject().endArray().endObject().startObject().field("sku_type", "bar1").startArray("colors").startObject().field("name", "red").endObject().startObject().field("name", "blue").endObject().startObject().field("name", "white").endObject().endArray().endObject().startObject().field("sku_type", "bar1").startArray("colors").startObject().field("name", "black").endObject().startObject().field("name", "blue").endObject().endArray().endObject().startObject().field("sku_type", "bar2").startArray("colors").startObject().field("name", "orange").endObject().endArray().endObject().startObject().field("sku_type", "bar2").startArray("colors").startObject().field("name", "pink").endObject().endArray().endObject().endArray().startArray("category").startObject().field("name", "abc").endObject().startObject().field("name", "klm").endObject().startObject().field("name", "xyz").endObject().endArray().endObject()).get();
    assertNoFailuresAndResponse(prepareSearch("idx3").addAggregation(nested("nested_0", "category").subAggregation(terms("group_by_category").field("category.name").subAggregation(reverseNested("to_root").subAggregation(nested("nested_1", "sku").subAggregation(filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation(count("sku_count").field("sku.sku_type"))))))), response -> {
        assertHitCount(response, 1);
        Nested nested0 = response.getAggregations().get("nested_0");
        assertThat(nested0.getDocCount(), equalTo(3L));
        Terms terms = nested0.getAggregations().get("group_by_category");
        assertThat(terms.getBuckets().size(), equalTo(3));
        for (String bucketName : new String[] { "abc", "klm", "xyz" }) {
            logger.info("Checking results for bucket {}", bucketName);
            Terms.Bucket bucket = terms.getBucketByKey(bucketName);
            assertThat(bucket.getDocCount(), equalTo(1L));
            ReverseNested toRoot = bucket.getAggregations().get("to_root");
            assertThat(toRoot.getDocCount(), equalTo(1L));
            Nested nested1 = toRoot.getAggregations().get("nested_1");
            assertThat(nested1.getDocCount(), equalTo(5L));
            Filter filterByBar = nested1.getAggregations().get("filter_by_sku");
            assertThat(filterByBar.getDocCount(), equalTo(3L));
            ValueCount barCount = filterByBar.getAggregations().get("sku_count");
            assertThat(barCount.getValue(), equalTo(3L));
        }
    });
    assertNoFailuresAndResponse(prepareSearch("idx3").addAggregation(nested("nested_0", "category").subAggregation(terms("group_by_category").field("category.name").subAggregation(reverseNested("to_root").subAggregation(nested("nested_1", "sku").subAggregation(filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation(nested("nested_2", "sku.colors").subAggregation(filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation(reverseNested("reverse_to_sku").path("sku").subAggregation(count("sku_count").field("sku.sku_type")))))))))), response -> {
        assertHitCount(response, 1);
        Nested nested0 = response.getAggregations().get("nested_0");
        assertThat(nested0.getDocCount(), equalTo(3L));
        Terms terms = nested0.getAggregations().get("group_by_category");
        assertThat(terms.getBuckets().size(), equalTo(3));
        for (String bucketName : new String[] { "abc", "klm", "xyz" }) {
            logger.info("Checking results for bucket {}", bucketName);
            Terms.Bucket bucket = terms.getBucketByKey(bucketName);
            assertThat(bucket.getDocCount(), equalTo(1L));
            ReverseNested toRoot = bucket.getAggregations().get("to_root");
            assertThat(toRoot.getDocCount(), equalTo(1L));
            Nested nested1 = toRoot.getAggregations().get("nested_1");
            assertThat(nested1.getDocCount(), equalTo(5L));
            Filter filterByBar = nested1.getAggregations().get("filter_by_sku");
            assertThat(filterByBar.getDocCount(), equalTo(3L));
            Nested nested2 = filterByBar.getAggregations().get("nested_2");
            assertThat(nested2.getDocCount(), equalTo(8L));
            Filter filterBarColor = nested2.getAggregations().get("filter_sku_color");
            assertThat(filterBarColor.getDocCount(), equalTo(2L));
            ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku");
            assertThat(reverseToBar.getDocCount(), equalTo(2L));
            ValueCount barCount = reverseToBar.getAggregations().get("sku_count");
            assertThat(barCount.getValue(), equalTo(2L));
        }
    });
}
243744.8122150elasticsearch
public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
    String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject());
    final double exceptionRate;
    final double exceptionOnOpenRate;
    if (frequently()) {
        if (randomBoolean()) {
            if (randomBoolean()) {
                exceptionOnOpenRate = 1.0 / between(5, 100);
                exceptionRate = 0.0d;
            } else {
                exceptionRate = 1.0 / between(5, 100);
                exceptionOnOpenRate = 0.0d;
            }
        } else {
            exceptionOnOpenRate = 1.0 / between(5, 100);
            exceptionRate = 1.0 / between(5, 100);
        }
    } else {
        exceptionRate = 0d;
        exceptionOnOpenRate = 0d;
    }
    final boolean createIndexWithoutErrors = randomBoolean();
    int numInitialDocs = 0;
    if (createIndexWithoutErrors) {
        Settings.Builder settings = Settings.builder().put("index.number_of_replicas", numberOfReplicas());
        logger.info("creating index: [test] using settings: [{}]", settings.build());
        indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).get();
        numInitialDocs = between(10, 100);
        ensureGreen();
        for (int i = 0; i < numInitialDocs; i++) {
            prepareIndex("test").setId("init" + i).setSource("test", "init").get();
        }
        indicesAdmin().prepareRefresh("test").execute().get();
        indicesAdmin().prepareFlush("test").execute().get();
        indicesAdmin().prepareClose("test").execute().get();
        indicesAdmin().prepareOpen("test").execute().get();
    } else {
        Settings.Builder settings = Settings.builder().put("index.number_of_replicas", randomIntBetween(0, 1)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryFactory.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate);
        logger.info("creating index: [test] using settings: [{}]", settings.build());
        indicesAdmin().prepareCreate("test").setSettings(settings).setMapping(mapping).get();
    }
    ClusterHealthResponse clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest(new String[] {}).waitForYellowStatus().masterNodeTimeout(TimeValue.timeValueSeconds(5)).timeout(TimeValue.timeValueSeconds(5))).get();
    final int numDocs;
    final boolean expectAllShardsFailed;
    if (clusterHealthResponse.isTimedOut()) {
        logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
        numDocs = 1;
        expectAllShardsFailed = true;
    } else {
        numDocs = between(10, 100);
        expectAllShardsFailed = false;
    }
    int numCreated = 0;
    boolean[] added = new boolean[numDocs];
    for (int i = 0; i < numDocs; i++) {
        added[i] = false;
        try {
            DocWriteResponse indexResponse = prepareIndex("test").setId(Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
            if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
                numCreated++;
                added[i] = true;
            }
        } catch (ElasticsearchException ex) {
        }
    }
    ESIntegTestCase.NumShards numShards = getNumShards("test");
    logger.info("Start Refresh");
    final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get();
    final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
    logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
    final int numSearches = scaledRandomIntBetween(10, 20);
    final int finalNumCreated = numCreated;
    final int finalNumInitialDocs = numInitialDocs;
    for (int i = 0; i < numSearches; i++) {
        try {
            int docToQuery = between(0, numDocs - 1);
            int expectedResults = added[docToQuery] ? 1 : 0;
            logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
            assertResponse(prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults), response -> {
                logger.info("Successful shards: [{}]  numShards: [{}]", response.getSuccessfulShards(), numShards.numPrimaries);
                if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) {
                    assertResultsAndLogOnFailure(expectedResults, response);
                }
            });
            assertResponse(prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC), response -> {
                logger.info("Match all Successful shards: [{}]  numShards: [{}]", response.getSuccessfulShards(), numShards.numPrimaries);
                if (response.getSuccessfulShards() == numShards.numPrimaries && refreshFailed == false) {
                    assertResultsAndLogOnFailure(finalNumCreated + finalNumInitialDocs, response);
                }
            });
        } catch (SearchPhaseExecutionException ex) {
            logger.info("SearchPhaseException: [{}]", ex.getMessage());
            if ((expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed")) == false) {
                throw ex;
            }
        }
    }
    if (createIndexWithoutErrors) {
        indicesAdmin().prepareClose("test").execute().get();
        indicesAdmin().prepareOpen("test").execute().get();
        ensureGreen();
        assertHitCountAndNoFailures(prepareSearch().setQuery(QueryBuilders.matchQuery("test", "init")), numInitialDocs);
    }
}
244204.6115169elasticsearch
public void testIncludeGlobalState() throws Exception {
    createRepository("test-repo", "fs");
    boolean testTemplate = randomBoolean();
    boolean testPipeline = randomBoolean();
    boolean testScript = (testTemplate == false && testPipeline == false) || randomBoolean();
    if (testTemplate) {
        logger.info("-->  creating test template");
        assertThat(indicesAdmin().preparePutTemplate("test-template").setPatterns(Collections.singletonList("te*")).setMapping(XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field1").field("type", "text").field("store", true).endObject().startObject("field2").field("type", "keyword").field("store", true).endObject().endObject().endObject().endObject()).get().isAcknowledged(), equalTo(true));
    }
    if (testPipeline) {
        logger.info("-->  creating test pipeline");
        BytesReference pipelineSource = BytesReference.bytes(jsonBuilder().startObject().field("description", "my_pipeline").startArray("processors").startObject().startObject("test").endObject().endObject().endArray().endObject());
        assertAcked(clusterAdmin().preparePutPipeline("barbaz", pipelineSource, XContentType.JSON).get());
    }
    if (testScript) {
        logger.info("-->  creating test script");
        assertAcked(clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray("{\"script\": { \"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"1\"} }"), XContentType.JSON));
    }
    logger.info("--> snapshot without global state");
    CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get();
    assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
    assertThat(getSnapshot("test-repo", "test-snap-no-global-state").state(), equalTo(SnapshotState.SUCCESS));
    SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-no-global-state").get();
    assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1));
    SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0);
    assertThat(snapshotStatus.includeGlobalState(), equalTo(false));
    logger.info("--> snapshot with global state");
    createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get();
    assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
    assertThat(getSnapshot("test-repo", "test-snap-with-global-state").state(), equalTo(SnapshotState.SUCCESS));
    snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-with-global-state").get();
    assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1));
    snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0);
    assertThat(snapshotStatus.includeGlobalState(), equalTo(true));
    if (testTemplate) {
        logger.info("-->  delete test template");
        cluster().wipeTemplates("test-template");
        GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get();
        assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
    }
    if (testPipeline) {
        logger.info("-->  delete test pipeline");
        assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get());
    }
    if (testScript) {
        logger.info("-->  delete test script");
        assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get());
    }
    logger.info("--> try restoring from snapshot without global state");
    RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(false).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
    logger.info("--> check that template wasn't restored");
    GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get();
    assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
    logger.info("--> restore cluster state");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
    if (testTemplate) {
        logger.info("--> check that template is restored");
        getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get();
        assertIndexTemplateExists(getIndexTemplatesResponse, "test-template");
    }
    if (testPipeline) {
        logger.info("--> check that pipeline is restored");
        GetPipelineResponse getPipelineResponse = clusterAdmin().prepareGetPipeline("barbaz").get();
        assertTrue(getPipelineResponse.isFound());
    }
    if (testScript) {
        logger.info("--> check that script is restored");
        GetStoredScriptResponse getStoredScriptResponse = clusterAdmin().prepareGetStoredScript("foobar").get();
        assertNotNull(getStoredScriptResponse.getSource());
    }
    createIndexWithRandomDocs("test-idx", 100);
    logger.info("--> snapshot without global state but with indices");
    createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get();
    assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
    assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
    assertThat(getSnapshot("test-repo", "test-snap-no-global-state-with-index").state(), equalTo(SnapshotState.SUCCESS));
    logger.info("-->  delete global state and index ");
    cluster().wipeIndices("test-idx");
    if (testTemplate) {
        cluster().wipeTemplates("test-template");
    }
    if (testPipeline) {
        assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get());
    }
    if (testScript) {
        assertAcked(clusterAdmin().prepareDeleteStoredScript("foobar").get());
    }
    getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get();
    assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
    logger.info("--> try restoring index and cluster state from snapshot without global state");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index").setWaitForCompletion(true).setRestoreGlobalState(false).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
    logger.info("--> check that global state wasn't restored but index was");
    getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get();
    assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
    assertFalse(clusterAdmin().prepareGetPipeline("barbaz").get().isFound());
    assertNull(clusterAdmin().prepareGetStoredScript("foobar").get().getSource());
    assertDocCount("test-idx", 100L);
}
243536.9927147elasticsearch
protected void masterOperation(Task task, final IndicesAliasesRequest request, final ClusterState state, final ActionListener<IndicesAliasesResponse> listener) {
    List<AliasActions> actions = request.aliasActions();
    List<AliasAction> finalActions = new ArrayList<>();
    List<AliasActionResult> actionResults = new ArrayList<>();
    Set<String> aliases = new HashSet<>();
    for (AliasActions action : actions) {
        int numAliasesRemoved = 0;
        List<String> resolvedIndices = new ArrayList<>();
        List<String> concreteDataStreams = indexNameExpressionResolver.dataStreamNames(state, request.indicesOptions(), action.indices());
        final Index[] concreteIndices;
        if (concreteDataStreams.size() != 0) {
            Index[] unprocessedConcreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), true, action.indices());
            List<Index> nonBackingIndices = Arrays.stream(unprocessedConcreteIndices).filter(index -> {
                var ia = state.metadata().getIndicesLookup().get(index.getName());
                return ia.getParentDataStream() == null;
            }).toList();
            concreteIndices = nonBackingIndices.toArray(Index[]::new);
            switch(action.actionType()) {
                case ADD ->
                    {
                        if (action.routing() != null) {
                            throw new IllegalArgumentException("aliases that point to data streams don't support routing");
                        }
                        if (action.indexRouting() != null) {
                            throw new IllegalArgumentException("aliases that point to data streams don't support index_routing");
                        }
                        if (action.searchRouting() != null) {
                            throw new IllegalArgumentException("aliases that point to data streams don't support search_routing");
                        }
                        if (action.isHidden() != null) {
                            throw new IllegalArgumentException("aliases that point to data streams don't support is_hidden");
                        }
                        if (nonBackingIndices.isEmpty() == false) {
                            throw new IllegalArgumentException("expressions " + Arrays.toString(action.indices()) + " that match with both data streams and regular indices are disallowed");
                        }
                        for (String dataStreamName : concreteDataStreams) {
                            for (String alias : concreteDataStreamAliases(action, state.metadata(), dataStreamName)) {
                                finalActions.add(new AddDataStreamAlias(alias, dataStreamName, action.writeIndex(), action.filter()));
                            }
                        }
                        actionResults.add(AliasActionResult.buildSuccess(concreteDataStreams, action));
                        continue;
                    }
                case REMOVE ->
                    {
                        for (String dataStreamName : concreteDataStreams) {
                            for (String alias : concreteDataStreamAliases(action, state.metadata(), dataStreamName)) {
                                finalActions.add(new AliasAction.RemoveDataStreamAlias(alias, dataStreamName, action.mustExist()));
                                numAliasesRemoved++;
                            }
                        }
                        if (nonBackingIndices.isEmpty() == false) {
                            resolvedIndices.addAll(concreteDataStreams);
                        } else {
                            actionResults.add(AliasActionResult.build(concreteDataStreams, action, numAliasesRemoved));
                            continue;
                        }
                    }
                default ->
                    throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]");
            }
        } else {
            concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), false, action.indices());
        }
        for (Index concreteIndex : concreteIndices) {
            IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(concreteIndex.getName());
            assert indexAbstraction != null : "invalid cluster metadata. index [" + concreteIndex.getName() + "] was not found";
            if (indexAbstraction.getParentDataStream() != null) {
                throw new IllegalArgumentException("The provided expressions [" + String.join(",", action.indices()) + "] match a backing index belonging to data stream [" + indexAbstraction.getParentDataStream().getName() + "]. Data stream backing indices don't support aliases.");
            }
        }
        final Optional<Exception> maybeException = requestValidators.validateRequest(request, state, concreteIndices);
        if (maybeException.isPresent()) {
            listener.onFailure(maybeException.get());
            return;
        }
        Collections.addAll(aliases, action.getOriginalAliases());
        long now = System.currentTimeMillis();
        for (final Index index : concreteIndices) {
            switch(action.actionType()) {
                case ADD:
                    for (String alias : concreteAliases(action, state.metadata(), index.getName())) {
                        String resolvedName = IndexNameExpressionResolver.resolveDateMathExpression(alias, now);
                        finalActions.add(new AliasAction.Add(index.getName(), resolvedName, action.filter(), action.indexRouting(), action.searchRouting(), action.writeIndex(), systemIndices.isSystemName(resolvedName) ? Boolean.TRUE : action.isHidden()));
                    }
                    break;
                case REMOVE:
                    for (String alias : concreteAliases(action, state.metadata(), index.getName())) {
                        finalActions.add(new AliasAction.Remove(index.getName(), alias, action.mustExist()));
                        numAliasesRemoved++;
                    }
                    break;
                case REMOVE_INDEX:
                    finalActions.add(new AliasAction.RemoveIndex(index.getName()));
                    break;
                default:
                    throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]");
            }
        }
        Arrays.stream(concreteIndices).map(Index::getName).forEach(resolvedIndices::add);
        actionResults.add(AliasActionResult.build(resolvedIndices, action, numAliasesRemoved));
    }
    if (finalActions.isEmpty() && false == actions.isEmpty()) {
        throw new AliasesNotFoundException(aliases.toArray(new String[aliases.size()]));
    }
    request.aliasActions().clear();
    IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(unmodifiableList(finalActions), unmodifiableList(actionResults)).ackTimeout(request.ackTimeout()).masterNodeTimeout(request.masterNodeTimeout());
    indexAliasesService.indicesAliases(updateRequest, listener.delegateResponse((l, e) -> {
        logger.debug("failed to perform aliases", e);
        l.onFailure(e);
    }));
}
242277.1414196elasticsearch
public void testCollectSearchShards() throws Exception {
    int numClusters = randomIntBetween(2, 10);
    DiscoveryNode[] nodes = new DiscoveryNode[numClusters];
    Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
    Settings.Builder builder = Settings.builder();
    MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false);
    Settings settings = builder.build();
    try (MockTransportService service = MockTransportService.createNewService(settings, VersionInformation.CURRENT, TransportVersion.current(), threadPool, null)) {
        service.start();
        service.acceptIncomingRequests();
        TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0);
        RemoteClusterService remoteClusterService = service.getRemoteClusterService();
        {
            final CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<Map<String, SearchShardsResponse>> response = new AtomicReference<>();
            var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true);
            TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, new MatchAllQueryBuilder(), randomBoolean(), null, remoteIndicesByCluster, clusters, timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch));
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertNotNull(response.get());
            Map<String, SearchShardsResponse> map = response.get();
            assertEquals(numClusters, map.size());
            for (int i = 0; i < numClusters; i++) {
                String clusterAlias = "remote" + i;
                assertTrue(map.containsKey(clusterAlias));
                SearchShardsResponse shardsResponse = map.get(clusterAlias);
                assertThat(shardsResponse.getNodes(), hasSize(1));
            }
            assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0));
        }
        {
            final CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<Exception> failure = new AtomicReference<>();
            var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true);
            TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", null, new MatchAllQueryBuilder(), randomBoolean(), null, remoteIndicesByCluster, clusters, timeProvider, service, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch));
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertEquals(numClusters, clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
            assertNotNull(failure.get());
            assertThat(failure.get(), instanceOf(RemoteTransportException.class));
            RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get();
            assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status());
        }
        int numDisconnectedClusters = randomIntBetween(1, numClusters);
        Set<DiscoveryNode> disconnectedNodes = Sets.newHashSetWithExpectedSize(numDisconnectedClusters);
        Set<Integer> disconnectedNodesIndices = Sets.newHashSetWithExpectedSize(numDisconnectedClusters);
        while (disconnectedNodes.size() < numDisconnectedClusters) {
            int i = randomIntBetween(0, numClusters - 1);
            if (disconnectedNodes.add(nodes[i])) {
                assertTrue(disconnectedNodesIndices.add(i));
            }
        }
        CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters);
        RemoteClusterServiceTests.addConnectionListener(remoteClusterService, new TransportConnectionListener() {

            @Override
            public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) {
                if (disconnectedNodes.remove(node)) {
                    disconnectedLatch.countDown();
                }
            }
        });
        for (DiscoveryNode disconnectedNode : disconnectedNodes) {
            service.addFailToSendNoConnectRule(disconnectedNode.getAddress());
        }
        {
            final CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<Exception> failure = new AtomicReference<>();
            var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> false);
            TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, new MatchAllQueryBuilder(), randomBoolean(), null, remoteIndicesByCluster, clusters, timeProvider, service, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch));
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertEquals(numDisconnectedClusters, clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
            assertNotNull(failure.get());
            assertThat(failure.get(), instanceOf(RemoteTransportException.class));
            assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster ["));
            assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class));
        }
        for (int i : disconnectedNodesIndices) {
            RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
        }
        {
            final CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<Map<String, SearchShardsResponse>> response = new AtomicReference<>();
            var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true);
            TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, new MatchAllQueryBuilder(), randomBoolean(), null, remoteIndicesByCluster, clusters, timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch));
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertNotNull(response.get());
            Map<String, SearchShardsResponse> map = response.get();
            assertEquals(numClusters - disconnectedNodesIndices.size(), map.size());
            assertEquals(disconnectedNodesIndices.size(), clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
            for (int i = 0; i < numClusters; i++) {
                String clusterAlias = "remote" + i;
                if (disconnectedNodesIndices.contains(i)) {
                    assertFalse(map.containsKey(clusterAlias));
                } else {
                    assertNotNull(map.get(clusterAlias));
                }
            }
        }
        assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS));
        service.clearAllRules();
        if (randomBoolean()) {
            for (int i : disconnectedNodesIndices) {
                if (randomBoolean()) {
                    RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
                }
            }
        }
        assertBusy(() -> {
            final CountDownLatch latch = new CountDownLatch(1);
            AtomicReference<Map<String, SearchShardsResponse>> response = new AtomicReference<>();
            var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true);
            TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, new MatchAllQueryBuilder(), randomBoolean(), null, remoteIndicesByCluster, clusters, timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch));
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertEquals(0, clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
            assertNotNull(response.get());
            Map<String, SearchShardsResponse> map = response.get();
            assertEquals(numClusters, map.size());
            for (int i = 0; i < numClusters; i++) {
                String clusterAlias = "remote" + i;
                assertTrue(map.containsKey(clusterAlias));
                assertNotNull(map.get(clusterAlias));
            }
        });
        assertEquals(0, service.getConnectionManager().size());
    } finally {
        for (MockTransportService mockTransportService : mockTransportServices) {
            mockTransportService.close();
        }
    }
}
242647.9414187elasticsearch
 static SnapshotInfo mutateSnapshotInfo(SnapshotInfo instance) {
    switch(randomIntBetween(0, 10)) {
        case 0:
            final String newName = randomValueOtherThan(instance.snapshotId().getName(), () -> randomAlphaOfLength(5));
            final String newUuid = randomValueOtherThan(instance.snapshotId().getUUID(), () -> randomAlphaOfLength(5));
            final SnapshotId snapshotId = randomBoolean() ? new SnapshotId(instance.snapshotId().getName(), newUuid) : new SnapshotId(newName, instance.snapshotId().getUUID());
            final String repo = randomBoolean() ? instance.repository() : randomAlphaOfLength(5);
            return new SnapshotInfo(new Snapshot(repo, snapshotId), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 1:
            final int indicesSize = randomValueOtherThan(instance.indices().size(), () -> randomIntBetween(1, 10));
            final List<String> indices = Arrays.asList(randomArray(indicesSize, indicesSize, String[]::new, () -> randomAlphaOfLengthBetween(2, 20)));
            return new SnapshotInfo(instance.snapshot(), indices, instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 2:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), randomValueOtherThan(instance.startTime(), ESTestCase::randomNonNegativeLong), instance.indexSnapshotDetails());
        case 3:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), randomValueOtherThan(instance.reason(), () -> randomAlphaOfLengthBetween(5, 15)), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 4:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), randomValueOtherThan(instance.endTime(), ESTestCase::randomNonNegativeLong), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 5:
            final int totalShards = randomValueOtherThan(instance.totalShards(), () -> randomIntBetween(0, 100));
            final List<SnapshotShardFailure> shardFailures = randomShardFailures(randomIntBetween(0, totalShards));
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), totalShards, shardFailures, instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 6:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), Boolean.FALSE.equals(instance.includeGlobalState()), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 7:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), randomValueOtherThan(instance.userMetadata(), SnapshotInfoTestUtils::randomUserMetadata), instance.startTime(), instance.indexSnapshotDetails());
        case 8:
            final List<String> dataStreams = randomValueOtherThan(instance.dataStreams(), () -> Arrays.asList(randomArray(0, 10, String[]::new, () -> randomAlphaOfLengthBetween(2, 20))));
            return new SnapshotInfo(instance.snapshot(), instance.indices(), dataStreams, instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 9:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), randomValueOtherThan(instance.featureStates(), SnapshotInfoTestUtils::randomSnapshotFeatureInfos), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), instance.indexSnapshotDetails());
        case 10:
            return new SnapshotInfo(instance.snapshot(), instance.indices(), instance.dataStreams(), instance.featureStates(), instance.reason(), instance.endTime(), instance.totalShards(), instance.shardFailures(), instance.includeGlobalState(), instance.userMetadata(), instance.startTime(), randomValueOtherThan(instance.indexSnapshotDetails(), SnapshotInfoTestUtils::randomIndexSnapshotDetails));
        default:
            throw new IllegalArgumentException("invalid randomization case");
    }
}
243546.752211elasticsearch
public static Iterable<Object[]> parameters() {
    String read = "Attribute[channel=0]";
    Function<String, String> evaluatorName = s -> "ToIntegerFrom" + s + "Evaluator[field=" + read + "]";
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    TestCaseSupplier.forUnaryInt(suppliers, read, DataTypes.INTEGER, i -> i, Integer.MIN_VALUE, Integer.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataTypes.INTEGER, b -> b ? 1 : 0, List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("Long"), dateCases(0, Integer.MAX_VALUE), DataTypes.INTEGER, l -> ((Long) l).intValue(), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("Long"), dateCases(Integer.MAX_VALUE + 1L, Long.MAX_VALUE), DataTypes.INTEGER, l -> null, l -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + l + "] out of [integer] range"));
    TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("String"), DataTypes.INTEGER, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + bytesRef.utf8ToString() + "]"));
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.INTEGER, d -> safeToInt(Math.round(d)), Integer.MIN_VALUE, Integer.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.INTEGER, d -> null, Double.NEGATIVE_INFINITY, Integer.MIN_VALUE - 1d, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [integer] range"));
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.INTEGER, d -> null, Integer.MAX_VALUE + 1d, Double.POSITIVE_INFINITY, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [integer] range"));
    TestCaseSupplier.forUnaryUnsignedLong(suppliers, evaluatorName.apply("UnsignedLong"), DataTypes.INTEGER, BigInteger::intValue, BigInteger.ZERO, BigInteger.valueOf(Integer.MAX_VALUE), List.of());
    TestCaseSupplier.forUnaryUnsignedLong(suppliers, evaluatorName.apply("UnsignedLong"), DataTypes.INTEGER, ul -> null, BigInteger.valueOf(Integer.MAX_VALUE).add(BigInteger.ONE), UNSIGNED_LONG_MAX, ul -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + ul + "] out of [integer] range"));
    TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("Long"), DataTypes.INTEGER, l -> (int) l, Integer.MIN_VALUE, Integer.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("Long"), DataTypes.INTEGER, l -> null, Long.MIN_VALUE, Integer.MIN_VALUE - 1L, l -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + l + "] out of [integer] range"));
    TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("Long"), DataTypes.INTEGER, l -> null, Integer.MAX_VALUE + 1L, Long.MAX_VALUE, l -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + l + "] out of [integer] range"));
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.intCases(Integer.MIN_VALUE, Integer.MAX_VALUE, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.INTEGER, bytesRef -> Integer.valueOf(((BytesRef) bytesRef).utf8ToString()), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Integer.MIN_VALUE, Integer.MAX_VALUE, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.INTEGER, bytesRef -> safeToInt(Math.round(Double.parseDouble(((BytesRef) bytesRef).utf8ToString()))), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Double.NEGATIVE_INFINITY, Integer.MIN_VALUE - 1d, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.INTEGER, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + ((BytesRef) bytesRef).utf8ToString() + "]"));
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Integer.MAX_VALUE + 1d, Double.POSITIVE_INFINITY, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.INTEGER, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + ((BytesRef) bytesRef).utf8ToString() + "]"));
    TestCaseSupplier.unary(suppliers, "Attribute[channel=0]", List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomInt, EsqlDataTypes.COUNTER_INTEGER)), DataTypes.INTEGER, l -> l, List.of());
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
242759.332150elasticsearch
private static void parseVertices(XContentParser parser, Hop currentHop) throws IOException {
    XContentParser.Token token;
    String fieldName = null;
    while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
        if (token == XContentParser.Token.START_OBJECT) {
            String field = null;
            Map<String, TermBoost> includes = null;
            HashSet<String> excludes = null;
            int size = 10;
            int minDocCount = 3;
            int shardMinDocCount = 2;
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    fieldName = parser.currentName();
                    token = parser.nextToken();
                }
                if (token == XContentParser.Token.START_ARRAY) {
                    if (INCLUDE_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        if (excludes != null) {
                            throw new ElasticsearchParseException("Graph vertices definition cannot contain both " + INCLUDE_FIELD.getPreferredName() + " and " + EXCLUDE_FIELD.getPreferredName() + " clauses", token.name());
                        }
                        includes = new HashMap<>();
                        while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                            if (token == XContentParser.Token.START_OBJECT) {
                                String includeTerm = null;
                                float boost = 1f;
                                while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                                    if (token == XContentParser.Token.FIELD_NAME) {
                                        fieldName = parser.currentName();
                                    } else {
                                        if (token == XContentParser.Token.VALUE_STRING) {
                                            if (TERM_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                                                includeTerm = parser.text();
                                            } else {
                                                throw new ElasticsearchParseException("Graph vertices definition " + INCLUDE_FIELD.getPreferredName() + " clause has invalid property:" + fieldName);
                                            }
                                        } else if (token == XContentParser.Token.VALUE_NUMBER) {
                                            if (BOOST_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                                                boost = parser.floatValue();
                                            } else {
                                                throw new ElasticsearchParseException("Graph vertices definition " + INCLUDE_FIELD.getPreferredName() + " clause has invalid property:" + fieldName);
                                            }
                                        } else {
                                            throw new ElasticsearchParseException("Graph vertices definition " + INCLUDE_FIELD.getPreferredName() + " clause has invalid property type:" + token.name());
                                        }
                                    }
                                }
                                if (includeTerm == null) {
                                    throw new ElasticsearchParseException("Graph vertices definition " + INCLUDE_FIELD.getPreferredName() + " clause has missing object property for term");
                                }
                                includes.put(includeTerm, new TermBoost(includeTerm, boost));
                            } else if (token == XContentParser.Token.VALUE_STRING) {
                                String term = parser.text();
                                includes.put(term, new TermBoost(term, 1f));
                            } else {
                                throw new ElasticsearchParseException("Graph vertices definition " + INCLUDE_FIELD.getPreferredName() + " clauses must be string terms or Objects with terms and boosts, not" + token.name());
                            }
                        }
                    } else if (EXCLUDE_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        if (includes != null) {
                            throw new ElasticsearchParseException("Graph vertices definition cannot contain both " + INCLUDE_FIELD.getPreferredName() + " and " + EXCLUDE_FIELD.getPreferredName() + " clauses", token.name());
                        }
                        excludes = new HashSet<>();
                        while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
                            excludes.add(parser.text());
                        }
                    } else {
                        throw new ElasticsearchParseException("Illegal property in graph vertices definition " + fieldName, token.name());
                    }
                }
                if (token == XContentParser.Token.VALUE_STRING) {
                    if (FIELD_NAME_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        field = parser.text();
                    } else {
                        throw new ElasticsearchParseException("Unknown string property: [" + fieldName + "]");
                    }
                }
                if (token == XContentParser.Token.VALUE_NUMBER) {
                    if (SIZE_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        size = parser.intValue();
                    } else if (MIN_DOC_COUNT_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        minDocCount = parser.intValue();
                    } else if (SHARD_MIN_DOC_COUNT_FIELD.match(fieldName, parser.getDeprecationHandler())) {
                        shardMinDocCount = parser.intValue();
                    } else {
                        throw new ElasticsearchParseException("Unknown numeric property: [" + fieldName + "]");
                    }
                }
            }
            if (field == null) {
                throw new ElasticsearchParseException("Missing field name in graph vertices definition", token.name());
            }
            VertexRequest vr = currentHop.addVertexRequest(field);
            if (includes != null) {
                for (TermBoost tb : includes.values()) {
                    vr.addInclude(tb.getTerm(), tb.getBoost());
                }
            }
            if (excludes != null) {
                for (String term : excludes) {
                    vr.addExclude(term);
                }
            }
            vr.size(size);
            vr.minDocCount(minDocCount);
            vr.shardMinDocCount(shardMinDocCount);
        }
    }
}
245828.462171elasticsearch
public void testUpdateIndexTemplateToDataStreamLifecyclePreference() throws Exception {
    RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build());
    Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction));
    LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase));
    PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy);
    assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, null);
    CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
    client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName));
        List<Index> backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices();
        assertThat(backingIndices.size(), equalTo(2));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(WaitForRolloverReadyStep.NAME));
    });
    DataStreamLifecycle customLifecycle = customEnabledLifecycle();
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(IndexSettings.PREFER_ILM, false).put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, customLifecycle);
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(3));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String writeIndex = backingIndices.get(2);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, new ExplainDataStreamLifecycleAction.Request(new String[] { writeIndex })).actionGet();
        assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(1));
        ExplainIndexDataStreamLifecycle dataStreamLifecycleExplain = dataStreamLifecycleExplainResponse.getIndices().get(0);
        assertThat(dataStreamLifecycleExplain.isManagedByLifecycle(), is(false));
    });
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, new DataStreamLifecycle())).actionGet();
    indexDocs(dataStreamName, 1);
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention())).actionGet();
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(4));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String thirdGenerationIndex = backingIndices.get(2);
        String writeIndex = backingIndices.get(3);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, thirdGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(thirdGenerationIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(false));
        IndexLifecycleExplainResponse writeIndexExplain = explainResponse.getIndexResponses().get(writeIndex);
        assertThat(writeIndexExplain.managedByILM(), is(false));
        ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, new ExplainDataStreamLifecycleAction.Request(new String[] { thirdGenerationIndex, writeIndex })).actionGet();
        assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(2));
        for (ExplainIndexDataStreamLifecycle index : dataStreamLifecycleExplainResponse.getIndices()) {
            assertThat(index.isManagedByLifecycle(), is(true));
            assertThat(index.getLifecycle(), equalTo(customLifecycle));
        }
    });
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, TimeValue.timeValueDays(90), false)).actionGet();
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String thirdGenerationIndex = backingIndices.get(2);
        String writeIndex = backingIndices.get(3);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, thirdGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(thirdGenerationIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(true));
        assertThat(thirdGenerationExplain.getPhase(), is("hot"));
        assertThat(thirdGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse writeIndexExplain = explainResponse.getIndexResponses().get(writeIndex);
        assertThat(writeIndexExplain.managedByILM(), is(true));
        assertThat(writeIndexExplain.getPhase(), is("hot"));
        assertThat(writeIndexExplain.getStep(), is(WaitForRolloverReadyStep.NAME));
    });
}
242783.9327147elasticsearch
protected void masterOperation(Task task, PutTrainedModelAliasAction.Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener) throws Exception {
    final boolean mlSupported = MachineLearningField.ML_API_FEATURE.check(licenseState);
    final Predicate<TrainedModelConfig> isLicensed = (model) -> mlSupported || model.getLicenseLevel() == License.OperationMode.BASIC;
    final String oldModelId = ModelAliasMetadata.fromState(state).getModelId(request.getModelAlias());
    if (oldModelId != null && (request.isReassign() == false)) {
        listener.onFailure(ExceptionsHelper.badRequestException("cannot assign model_alias [{}] to model_id [{}] as model_alias already refers to [{}]. " + "Set parameter [reassign] to [true] if model_alias should be reassigned.", request.getModelAlias(), request.getModelId(), oldModelId));
        return;
    }
    Set<String> modelIds = new HashSet<>();
    modelIds.add(request.getModelAlias());
    modelIds.add(request.getModelId());
    if (oldModelId != null) {
        modelIds.add(oldModelId);
    }
    trainedModelProvider.getTrainedModels(modelIds, GetTrainedModelsAction.Includes.empty(), true, null, ActionListener.wrap(models -> {
        TrainedModelConfig newModel = null;
        TrainedModelConfig oldModel = null;
        for (TrainedModelConfig config : models) {
            if (config.getModelId().equals(request.getModelId())) {
                newModel = config;
            }
            if (config.getModelId().equals(oldModelId)) {
                oldModel = config;
            }
            if (config.getModelId().equals(request.getModelAlias())) {
                listener.onFailure(ExceptionsHelper.badRequestException("model_alias cannot be the same as an existing trained model_id"));
                return;
            }
        }
        if (newModel == null) {
            listener.onFailure(ExceptionsHelper.missingTrainedModel(request.getModelId()));
            return;
        }
        if (isLicensed.test(newModel) == false) {
            listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING));
            return;
        }
        if (oldModel != null) {
            if (newModel.getInferenceConfig() != null && oldModel.getInferenceConfig() != null) {
                if (newModel.getInferenceConfig().getName().equals(oldModel.getInferenceConfig().getName()) == false) {
                    listener.onFailure(ExceptionsHelper.badRequestException("cannot reassign model_alias [{}] to model [{}] " + "with inference config type [{}] from model [{}] with type [{}]", request.getModelAlias(), newModel.getModelId(), newModel.getInferenceConfig().getName(), oldModel.getModelId(), oldModel.getInferenceConfig().getName()));
                    return;
                }
            }
            if (Objects.equals(newModel.getModelType(), oldModel.getModelType()) == false) {
                listener.onFailure(ExceptionsHelper.badRequestException("cannot reassign model_alias [{}] to model [{}] with type [{}] from model [{}] with type [{}]", request.getModelAlias(), newModel.getModelId(), Optional.ofNullable(newModel.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE).toString(), oldModel.getModelId(), Optional.ofNullable(oldModel.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE).toString()));
                return;
            }
            if (newModel.getModelType() == TrainedModelType.PYTORCH) {
                List<TrainedModelAssignment> oldAssignments = TrainedModelAssignmentMetadata.assignmentsForModelId(state, oldModelId);
                List<TrainedModelAssignment> newAssignments = TrainedModelAssignmentMetadata.assignmentsForModelId(state, newModel.getModelId());
                if (oldAssignments.isEmpty() == false) {
                    if (newAssignments.isEmpty()) {
                        listener.onFailure(ExceptionsHelper.badRequestException("cannot reassign model_alias [{}] to model [{}] from model [{}] as it is not yet deployed", request.getModelAlias(), newModel.getModelId(), oldModel.getModelId()));
                        return;
                    } else {
                        for (var oldAssignment : oldAssignments) {
                            Optional<AllocationStatus> oldAllocationStatus = oldAssignment.calculateAllocationStatus();
                            if (oldAllocationStatus.isPresent() && oldAllocationStatus.get().calculateState().isAnyOf(AllocationStatus.State.FULLY_ALLOCATED, AllocationStatus.State.STARTED)) {
                                for (var newAssignment : newAssignments) {
                                    Optional<AllocationStatus> newAllocationStatus = newAssignment.calculateAllocationStatus();
                                    if (newAllocationStatus.isEmpty() || newAllocationStatus.get().calculateState().equals(AllocationStatus.State.STARTING)) {
                                        listener.onFailure(ExceptionsHelper.badRequestException("cannot reassign model_alias [{}] to model [{}] " + " from model [{}] as it is not yet allocated to any nodes", request.getModelAlias(), newModel.getModelId(), oldModel.getModelId()));
                                        return;
                                    }
                                }
                            }
                        }
                    }
                }
            }
            Set<String> oldInputFields = new HashSet<>(oldModel.getInput().getFieldNames());
            Set<String> newInputFields = new HashSet<>(newModel.getInput().getFieldNames());
            if (Sets.difference(oldInputFields, newInputFields).size() > (oldInputFields.size() / 2) || Sets.intersection(newInputFields, oldInputFields).size() < (oldInputFields.size() / 2)) {
                String warning = Messages.getMessage(TRAINED_MODEL_INPUTS_DIFFER_SIGNIFICANTLY, request.getModelId(), oldModelId);
                auditor.warning(oldModelId, warning);
                logger.warn("[{}] {}", oldModelId, warning);
                HeaderWarning.addWarning(warning);
            }
        }
        submitUnbatchedTask("update-model-alias", new AckedClusterStateUpdateTask(request, listener) {

            @Override
            public ClusterState execute(final ClusterState currentState) {
                return updateModelAlias(currentState, request);
            }
        });
    }, listener::onFailure));
}
241602.8843151elasticsearch
public Optional<TokenizerUtils.CharSequenceRef> next() {
    int curIntChar;
    offsetStart = offsetEnd;
    while ((curIntChar = getNextChar()) >= 0) {
        char curChar = (char) curIntChar;
        if (isApostrophe(curChar)) {
            if (inSymbol) {
                offsetEnd++;
                continue;
            }
            if (inAnythingOtherThanSpace()) {
                putBackChar.add(curIntChar);
                return Optional.of(tokenComplete());
            }
            inSymbol = true;
            if (inSpacePrefix) {
                offsetEnd++;
                continue;
            }
            offsetEnd++;
            int nextIntChar = getNextChar();
            if (nextIntChar < 0) {
                return Optional.of(tokenComplete());
            }
            if (nextIntChar == 's' || nextIntChar == 't' || nextIntChar == 'm' || nextIntChar == 'd') {
                offsetEnd++;
            } else if (nextIntChar == 'r' || nextIntChar == 'v' || nextIntChar == 'l') {
                int nextNextIntChar = getNextChar();
                if (nextNextIntChar == 'e' || nextNextIntChar == 'l') {
                    offsetEnd++;
                    offsetEnd++;
                } else {
                    putBackChar.add(nextIntChar);
                    if (nextNextIntChar >= 0) {
                        putBackChar.add(nextNextIntChar);
                    }
                }
            } else {
                putBackChar.add(nextIntChar);
            }
            return Optional.of(tokenComplete());
        }
        if (inAnyLetter) {
            if (isLetter(curChar)) {
                offsetEnd++;
                continue;
            }
            putBackChar.add(curIntChar);
            return Optional.of(tokenComplete());
        }
        if (isLetter(curChar)) {
            if (inAnythingOtherThanSpace()) {
                putBackChar.add(curIntChar);
                return Optional.of(tokenComplete());
            }
            inAnyLetter = true;
            offsetEnd++;
            continue;
        }
        if (inAnyNumber) {
            if (isDigit(curChar)) {
                offsetEnd++;
                continue;
            }
            putBackChar.add(curIntChar);
            return Optional.of(tokenComplete());
        }
        if (isDigit(curChar)) {
            if (inAnythingOtherThanSpace()) {
                putBackChar.add(curIntChar);
                return Optional.of(tokenComplete());
            }
            inAnyNumber = true;
            offsetEnd++;
            continue;
        }
        if (inSymbol) {
            if (isSymbol(curChar)) {
                offsetEnd++;
                continue;
            }
            putBackChar.add(curIntChar);
            return Optional.of(tokenComplete());
        }
        if (isSymbol(curChar)) {
            if (inAnythingOtherThanSpace()) {
                putBackChar.add(curIntChar);
                return Optional.of(tokenComplete());
            }
            inSymbol = true;
            offsetEnd++;
            continue;
        }
        if (inWhiteSpace) {
            if (isWhitespace(curChar) && isSpace(curChar) == false) {
                offsetEnd++;
                continue;
            }
            if (isSpace(curChar)) {
                int nextInt = getNextChar();
                if (nextInt < 0) {
                    offsetEnd++;
                    return Optional.of(tokenComplete());
                }
                if (isWhitespace(nextInt)) {
                    offsetEnd++;
                    putBackChar.add(nextInt);
                    continue;
                }
                putBackChar.add(curIntChar);
                putBackChar.add(nextInt);
                return Optional.of(tokenComplete());
            }
            putBackChar.add(curIntChar);
            return Optional.of(tokenComplete());
        }
        if (isWhitespace(curChar)) {
            if (inAnythingOtherThanSpace()) {
                putBackChar.add(curIntChar);
                return Optional.of(tokenComplete());
            }
            if (isSpace(curChar) && inSpacePrefix == false) {
                offsetEnd++;
                inSpacePrefix = true;
                continue;
            }
            if (isSpace(curChar)) {
                int nextInt = getNextChar();
                if (nextInt < 0) {
                    offsetEnd++;
                    return Optional.of(tokenComplete());
                }
                if (isWhitespace(nextInt)) {
                    inWhiteSpace = true;
                    offsetEnd++;
                    putBackChar.add(nextInt);
                    continue;
                }
                putBackChar.add(curIntChar);
                putBackChar.add(nextInt);
                return Optional.of(tokenComplete());
            }
            inWhiteSpace = true;
            offsetEnd++;
        }
    }
    if (offsetEnd > offsetStart) {
        return Optional.of(tokenComplete());
    }
    return Optional.empty();
}
243934.4512179elasticsearch
public void cleanUpFeature(ClusterService clusterService, Client unwrappedClient, ActionListener<ResetFeatureStateResponse.ResetFeatureStateStatus> finalListener) {
    if (this.enabled == false) {
        SystemIndexPlugin.super.cleanUpFeature(clusterService, unwrappedClient, finalListener);
        return;
    }
    logger.info("Starting machine learning feature reset");
    OriginSettingClient client = new OriginSettingClient(unwrappedClient, ML_ORIGIN);
    final Map<String, Boolean> results = new ConcurrentHashMap<>();
    ActionListener<ResetFeatureStateResponse.ResetFeatureStateStatus> unsetResetModeListener = ActionListener.wrap(success -> client.execute(SetResetModeAction.INSTANCE, SetResetModeActionRequest.disabled(true), ActionListener.wrap(resetSuccess -> {
        finalListener.onResponse(success);
        logger.info("Finished machine learning feature reset");
    }, resetFailure -> {
        logger.error("failed to disable reset mode after state otherwise successful machine learning reset", resetFailure);
        finalListener.onFailure(ExceptionsHelper.serverError("failed to disable reset mode after state otherwise successful machine learning reset", resetFailure));
    })), failure -> {
        logger.error("failed to reset machine learning", failure);
        client.execute(SetResetModeAction.INSTANCE, SetResetModeActionRequest.disabled(false), ActionListener.wrap(resetSuccess -> finalListener.onFailure(failure), resetFailure -> {
            logger.error("failed to disable reset mode after state clean up failure", resetFailure);
            finalListener.onFailure(failure);
        }));
    });
    ActionListener<AcknowledgedResponse> pipelineValidation = unsetResetModeListener.<ListTasksResponse>delegateFailureAndWrap((delegate, listTasksResponse) -> {
        listTasksResponse.rethrowFailures("Waiting for indexing requests for .ml-* indices");
        if (results.values().stream().allMatch(b -> b)) {
            if (memoryTracker.get() != null) {
                memoryTracker.get().awaitAndClear(ActionListener.wrap(cacheCleared -> SystemIndexPlugin.super.cleanUpFeature(clusterService, client, delegate), clearFailed -> {
                    logger.error("failed to clear memory tracker cache via machine learning reset feature API", clearFailed);
                    SystemIndexPlugin.super.cleanUpFeature(clusterService, client, delegate);
                }));
                return;
            }
            SystemIndexPlugin.super.cleanUpFeature(clusterService, client, delegate);
        } else {
            final List<String> failedComponents = results.entrySet().stream().filter(result -> result.getValue() == false).map(Map.Entry::getKey).toList();
            delegate.onFailure(new RuntimeException("Some machine learning components failed to reset: " + failedComponents));
        }
    }).<StopDataFrameAnalyticsAction.Response>delegateFailureAndWrap((delegate, dataFrameStopResponse) -> {
        results.put("data_frame/analytics", dataFrameStopResponse.isStopped());
        if (results.values().stream().allMatch(b -> b)) {
            client.admin().cluster().prepareListTasks().setActions("xpack/ml/*").setWaitForCompletion(true).execute(delegate.delegateFailureAndWrap((l, listMlTasks) -> {
                listMlTasks.rethrowFailures("Waiting for machine learning tasks");
                client.admin().cluster().prepareListTasks().setActions("indices:data/write/bulk").setDetailed(true).setWaitForCompletion(true).setDescriptions("*.ml-*").execute(l);
            }));
        } else {
            final List<String> failedComponents = results.entrySet().stream().filter(result -> result.getValue() == false).map(Map.Entry::getKey).toList();
            delegate.onFailure(new RuntimeException("Some machine learning components failed to reset: " + failedComponents));
        }
    }).<CloseJobAction.Response>delegateFailureAndWrap((delegate, closeJobResponse) -> {
        results.put("anomaly_detectors", closeJobResponse.isClosed());
        if (machineLearningExtension.get().isDataFrameAnalyticsEnabled() == false) {
            delegate.onResponse(new StopDataFrameAnalyticsAction.Response(true));
            return;
        }
        StopDataFrameAnalyticsAction.Request stopDataFramesReq = new StopDataFrameAnalyticsAction.Request("_all").setAllowNoMatch(true);
        client.execute(StopDataFrameAnalyticsAction.INSTANCE, stopDataFramesReq, ActionListener.wrap(delegate::onResponse, failure -> {
            logger.warn("failed stopping data frame analytics jobs for machine learning feature reset. Attempting with force=true", failure);
            client.execute(StopDataFrameAnalyticsAction.INSTANCE, stopDataFramesReq.setForce(true), delegate);
        }));
    }).<StopDatafeedAction.Response>delegateFailureAndWrap((delegate, datafeedResponse) -> {
        results.put("datafeeds", datafeedResponse.isStopped());
        if (machineLearningExtension.get().isAnomalyDetectionEnabled() == false) {
            delegate.onResponse(new CloseJobAction.Response(true));
            return;
        }
        CloseJobAction.Request closeJobsRequest = new CloseJobAction.Request().setAllowNoMatch(true).setJobId("_all");
        client.execute(KillProcessAction.INSTANCE, new KillProcessAction.Request("*"), delegate.delegateFailureAndWrap((l, success) -> client.execute(CloseJobAction.INSTANCE, closeJobsRequest, ActionListener.wrap(l::onResponse, failure -> {
            logger.warn("failed closing anomaly jobs for machine learning feature reset. Attempting with force=true", failure);
            client.execute(CloseJobAction.INSTANCE, closeJobsRequest.setForce(true), l);
        }))));
    }).<CancelJobModelSnapshotUpgradeAction.Response>delegateFailureAndWrap((delegate, cancelUpgradesResponse) -> {
        if (machineLearningExtension.get().isAnomalyDetectionEnabled() == false) {
            delegate.onResponse(new StopDatafeedAction.Response(true));
            return;
        }
        StopDatafeedAction.Request stopDatafeedsReq = new StopDatafeedAction.Request("_all").setAllowNoMatch(true);
        client.execute(StopDatafeedAction.INSTANCE, stopDatafeedsReq, ActionListener.wrap(delegate::onResponse, failure -> {
            logger.warn("failed stopping datafeeds for machine learning feature reset. Attempting with force=true", failure);
            client.execute(StopDatafeedAction.INSTANCE, stopDatafeedsReq.setForce(true), delegate);
        }));
    }).<AcknowledgedResponse>delegateFailureAndWrap((delegate, acknowledgedResponse) -> {
        if (machineLearningExtension.get().isAnomalyDetectionEnabled() == false) {
            delegate.onResponse(new CancelJobModelSnapshotUpgradeAction.Response(true));
            return;
        }
        CancelJobModelSnapshotUpgradeAction.Request cancelSnapshotUpgradesReq = new CancelJobModelSnapshotUpgradeAction.Request("_all", "_all");
        client.execute(CancelJobModelSnapshotUpgradeAction.INSTANCE, cancelSnapshotUpgradesReq, delegate);
    }).delegateFailureAndWrap((delegate, acknowledgedResponse) -> {
        if (trainedModelAllocationClusterServiceSetOnce.get() == null || machineLearningExtension.get().isNlpEnabled() == false) {
            delegate.onResponse(AcknowledgedResponse.TRUE);
            return;
        }
        trainedModelAllocationClusterServiceSetOnce.get().removeAllModelAssignments(delegate);
    });
    ActionListener<AcknowledgedResponse> afterResetModeSet = ActionListener.wrap(acknowledgedResponse -> {
        int numberInferenceProcessors = countInferenceProcessors(clusterService.state());
        if (numberInferenceProcessors > 0) {
            unsetResetModeListener.onFailure(new RuntimeException("Unable to reset machine learning feature as there are ingest pipelines " + "still referencing trained machine learning models"));
            return;
        }
        pipelineValidation.onResponse(AcknowledgedResponse.of(true));
    }, finalListener::onFailure);
    client.execute(SetResetModeAction.INSTANCE, SetResetModeActionRequest.enabled(), afterResetModeSet);
}
243600.91208elasticsearch
public void testPruneToNextMainBranchAfterMinCountPrune() throws IOException {
    Field field1 = createKeywordFieldTestInstance("keyword1", 0);
    Field field2 = createKeywordFieldTestInstance("keyword2", 1);
    Field field3 = createKeywordFieldTestInstance("keyword3", 2);
    Field field2a = createKeywordFieldTestInstance("keyword2a", 3);
    Field field2b = createKeywordFieldTestInstance("keyword2b", 4);
    Field field2c = createKeywordFieldTestInstance("keyword2c", 5);
    Field field4 = createKeywordFieldTestInstance("keyword4", 6);
    Field field4a = createKeywordFieldTestInstance("keyword4a", 7);
    EclatMapReducer eclat = new EclatMapReducer(getTestName(), 0.1, 2, 10, true);
    HashBasedTransactionStore transactionStore = eclat.mapInit(mockBigArrays());
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-A"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-1"), tuple(field4a, "f4a-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-B"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-2"), tuple(field4a, "f4a-2"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-C"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-3"), tuple(field4a, "f4a-3"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-D"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-4"), tuple(field4a, "f4a-4"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-E"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-5"), tuple(field4a, "f4a-5"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-F"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-1"), tuple(field4a, "f4a-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-G"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-1"), tuple(field4a, "f4a-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-c"), tuple(field2, "f2-1"), tuple(field3, "f3-H"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-6"), tuple(field4a, "f4a-6"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-d"), tuple(field2, "f2-1"), tuple(field3, "f3-I"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-7"), tuple(field4a, "f4a-7"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-J"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-8"), tuple(field4a, "f4a-8"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-f"), tuple(field2, "f2-1"), tuple(field3, "f3-K"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-3"), tuple(field4a, "f4a-3"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-L"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"), tuple(field4, "f4-10"), tuple(field4a, "f4a-10"))), transactionStore);
    EclatMapReducer.EclatResult result = runEclat(eclat, List.of(field1, field2, field3, field2a, field2b, field2c, field4, field4a), transactionStore);
    assertThat(result.getFrequentItemSets().length, equalTo(6));
    assertThat(result.getFrequentItemSets()[0].getSupport(), equalTo(1.0));
    assertThat(result.getFrequentItemSets()[1].getSupport(), equalTo(0.5));
    assertThat(result.getFrequentItemSets()[2].getSupport(), equalTo(0.25));
    assertThat(result.getProfilingInfo().get("unique_items_after_reduce"), equalTo(39L));
    assertThat(result.getProfilingInfo().get("unique_items_after_prune"), equalTo(10L));
    assertThat(result.getProfilingInfo().get("total_transactions_after_reduce"), equalTo(12L));
    assertThat(result.getProfilingInfo().get("total_items_after_reduce"), equalTo(96L));
    assertThat(result.getProfilingInfo().get("total_items_after_prune"), equalTo(96L));
    assertThat((Long) result.getProfilingInfo().get("item_sets_checked_eclat"), greaterThanOrEqualTo(294L));
    assertThat((Long) result.getProfilingInfo().get("item_sets_checked_eclat"), lessThan(310L));
}
245164.275182elasticsearch
public void testValidateResourceAccessCheck() throws Exception {
    final ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class);
    when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMinutes(between(1, 10))));
    final ShardSearchContextId shardSearchContextId = new ShardSearchContextId(UUIDs.randomBase64UUID(), randomLong());
    try (LegacyReaderContext readerContext = new LegacyReaderContext(shardSearchContextId, indexService, shard, shard.acquireSearcherSupplier(), shardSearchRequest, Long.MAX_VALUE)) {
        readerContext.putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, mock(IndicesAccessControl.class));
        final MockLicenseState licenseState = mock(MockLicenseState.class);
        when(licenseState.isAllowed(Security.AUDITING_FEATURE)).thenReturn(true);
        final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, new ThreadContext(Settings.EMPTY));
        final AuditTrail auditTrail = mock(AuditTrail.class);
        final AuditTrailService auditTrailService = new AuditTrailService(auditTrail, licenseState);
        final SecuritySearchOperationListener listener = new SecuritySearchOperationListener(securityContext, auditTrailService);
        final TransportRequest request = mock(TransportRequest.class);
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false));
            AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new Authentication.RealmRef("realm", "file", "node")).build(false).writeToContext(securityContext.getThreadContext());
            listener.validateReaderContext(readerContext, request);
            verifyNoMoreInteractions(auditTrail);
        }
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false));
            AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), randomAlphaOfLength(5))).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build().writeToContext(securityContext.getThreadContext());
            listener.validateReaderContext(readerContext, request);
            verifyNoMoreInteractions(auditTrail);
        }
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm2", "file", "node")).build());
            AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new Authentication.RealmRef("realm", "file", "node")).build(false).writeToContext(securityContext.getThreadContext());
            listener.validateReaderContext(readerContext, request);
            verifyNoMoreInteractions(auditTrail);
        }
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build());
            AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm2", "file", "node")).build().writeToContext(securityContext.getThreadContext());
            listener.validateReaderContext(readerContext, request);
            verifyNoMoreInteractions(auditTrail);
        }
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            if (randomBoolean()) {
                readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build());
            } else {
                readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false));
            }
            final String differentRealmType = randomAlphaOfLength(5);
            Authentication currentAuthn = randomBoolean() ? AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new Authentication.RealmRef("realm", differentRealmType, "node")).build(false) : AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm", differentRealmType, "node")).build();
            currentAuthn.writeToContext(securityContext.getThreadContext());
            SearchContextMissingException e = expectThrows(SearchContextMissingException.class, () -> listener.validateReaderContext(readerContext, request));
            assertThat(e.contextId(), is(shardSearchContextId));
            verify(auditTrail).accessDenied(isNull(), eq(currentAuthn), isNull(), eq(request), isNull());
        }
        try (ThreadContext.StoredContext ignore = securityContext.getThreadContext().stashContext()) {
            if (randomBoolean()) {
                readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), randomAlphaOfLength(5), "node")).runAs().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build());
            } else {
                readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false));
            }
            Authentication currentAuthn = randomBoolean() ? AuthenticationTestHelper.builder().user(new User(randomAlphaOfLength(5), "role")).realmRef(new Authentication.RealmRef("realm", "file", "node")).build(false) : AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", "node")).runAs().user(new User(randomAlphaOfLength(5), "role")).realmRef(new RealmRef("realm", "file", "node")).build();
            currentAuthn.writeToContext(securityContext.getThreadContext());
            SearchContextMissingException e = expectThrows(SearchContextMissingException.class, () -> listener.validateReaderContext(readerContext, request));
            assertThat(e.contextId(), is(shardSearchContextId));
            verify(auditTrail).accessDenied(isNull(), eq(currentAuthn), isNull(), eq(request), isNull());
        }
    }
}
243626.5636130elasticsearch
public void testTemplateRender() throws Exception {
    Settings settings = SlackMessageDefaultsTests.randomSettings();
    SlackMessageDefaults defaults = new SlackMessageDefaults(settings);
    SlackMessage.Template.Builder templateBuilder = SlackMessage.Template.builder();
    if (randomBoolean()) {
        templateBuilder.setFrom(randomAlphaOfLength(10));
    }
    if (randomBoolean()) {
        int count = randomIntBetween(0, 3);
        for (int i = 0; i < count; i++) {
            templateBuilder.addTo(randomAlphaOfLength(10));
        }
    }
    if (randomBoolean()) {
        templateBuilder.setIcon(randomAlphaOfLength(10));
    }
    if (randomBoolean()) {
        templateBuilder.setText(randomAlphaOfLength(10));
    }
    if (templateBuilder.text == null || randomBoolean()) {
        int minimumAttachments = templateBuilder.text == null ? 1 : 0;
        int count = randomIntBetween(minimumAttachments, 3);
        for (int i = 0; i < count; i++) {
            Attachment.Template.Builder attachmentBuilder = createRandomAttachmentTemplateBuilder();
            templateBuilder.addAttachments(attachmentBuilder);
        }
    }
    TextTemplateEngine engine = new MockTextTemplateEngine();
    SlackMessage.Template template = templateBuilder.build();
    SlackMessage message = template.render("_w1", "_a1", engine, Collections.emptyMap(), defaults);
    assertThat(message, notNullValue());
    if (template.from != null) {
        assertThat(message.from, is(template.from.getTemplate()));
    } else {
        assertThat(message.from, is(defaults.from != null ? defaults.from : "_w1"));
    }
    if (template.to == null) {
        assertThat(message.to, is(defaults.to));
    } else {
        String[] expected = new String[message.to.length];
        for (int i = 0; i < expected.length; i++) {
            expected[i] = template.to[i].getTemplate();
        }
        assertThat(message.to, arrayContaining(expected));
    }
    assertThat(message.icon, is(template.icon != null ? template.icon.getTemplate() : defaults.icon));
    assertThat(message.text, is(template.text != null ? template.text.getTemplate() : defaults.text));
    if (template.attachments == null) {
        assertThat(message.attachments, nullValue());
    } else {
        for (int i = 0; i < template.attachments.length; i++) {
            Attachment.Template attachmentTemplate = template.attachments[i];
            Attachment attachment = message.attachments[i];
            assertThat(attachment.authorName, is(attachmentTemplate.authorName != null ? attachmentTemplate.authorName.getTemplate() : defaults.attachment.authorName));
            assertThat(attachment.authorLink, is(attachmentTemplate.authorLink != null ? attachmentTemplate.authorLink.getTemplate() : defaults.attachment.authorLink));
            assertThat(attachment.authorIcon, is(attachmentTemplate.authorIcon != null ? attachmentTemplate.authorIcon.getTemplate() : defaults.attachment.authorIcon));
            assertThat(attachment.color, is(attachmentTemplate.color != null ? attachmentTemplate.color.getTemplate() : defaults.attachment.color));
            assertThat(attachment.fallback, is(attachmentTemplate.fallback != null ? attachmentTemplate.fallback.getTemplate() : defaults.attachment.fallback));
            assertThat(attachment.imageUrl, is(attachmentTemplate.imageUrl != null ? attachmentTemplate.imageUrl.getTemplate() : defaults.attachment.imageUrl));
            assertThat(attachment.pretext, is(attachmentTemplate.pretext != null ? attachmentTemplate.pretext.getTemplate() : defaults.attachment.pretext));
            assertThat(attachment.thumbUrl, is(attachmentTemplate.thumbUrl != null ? attachmentTemplate.thumbUrl.getTemplate() : defaults.attachment.thumbUrl));
            assertThat(attachment.title, is(attachmentTemplate.title != null ? attachmentTemplate.title.getTemplate() : defaults.attachment.title));
            assertThat(attachment.titleLink, is(attachmentTemplate.titleLink != null ? attachmentTemplate.titleLink.getTemplate() : defaults.attachment.titleLink));
            assertThat(attachment.text, is(attachmentTemplate.text != null ? attachmentTemplate.text.getTemplate() : defaults.attachment.text));
            if (attachmentTemplate.fields == null) {
                assertThat(attachment.fields, nullValue());
            } else {
                for (int j = 0; j < attachmentTemplate.fields.length; j++) {
                    Field.Template fieldTemplate = attachmentTemplate.fields[j];
                    Field field = attachment.fields[j];
                    assertThat(field.title, is(fieldTemplate.title != null ? fieldTemplate.title.getTemplate() : defaults.attachment.field.title));
                    assertThat(field.value, is(fieldTemplate.value != null ? fieldTemplate.value.getTemplate() : defaults.attachment.field.value));
                    assertThat(field.isShort, is(fieldTemplate.isShort != null ? fieldTemplate.isShort : defaults.attachment.field.isShort));
                }
            }
            if (attachmentTemplate.markdownSupportedFields == null) {
                assertThat(attachment.markdownSupportedFields, nullValue());
            } else {
                for (int j = 0; j < attachmentTemplate.markdownSupportedFields.length; j++) {
                    String[] templateMarkdownSupportFields = Arrays.stream(attachmentTemplate.markdownSupportedFields).map(TextTemplate::getTemplate).toArray(String[]::new);
                    assertThat(attachment.markdownSupportedFields, arrayContainingInAnyOrder(templateMarkdownSupportFields));
                }
            }
        }
    }
}
242002.4855121gwt
private JsNode map(Node node) throws JsParserException {
    switch(node.getType()) {
        case TokenStream.SCRIPT:
            {
                JsBlock block = new JsBlock(makeSourceInfo(node));
                mapStatements(block.getStatements(), node);
                return block;
            }
        case TokenStream.DEBUGGER:
            return mapDebuggerStatement(node);
        case TokenStream.VOID:
            return null;
        case TokenStream.EXPRSTMT:
            return mapExprStmt(node);
        case TokenStream.REGEXP:
            return mapRegExp(node);
        case TokenStream.ADD:
            return mapBinaryOperation(JsBinaryOperator.ADD, node);
        case TokenStream.SUB:
            return mapBinaryOperation(JsBinaryOperator.SUB, node);
        case TokenStream.MUL:
            return mapBinaryOperation(JsBinaryOperator.MUL, node);
        case TokenStream.DIV:
            return mapBinaryOperation(JsBinaryOperator.DIV, node);
        case TokenStream.MOD:
            return mapBinaryOperation(JsBinaryOperator.MOD, node);
        case TokenStream.AND:
            return mapBinaryOperation(JsBinaryOperator.AND, node);
        case TokenStream.OR:
            return mapBinaryOperation(JsBinaryOperator.OR, node);
        case TokenStream.BITAND:
            return mapBinaryOperation(JsBinaryOperator.BIT_AND, node);
        case TokenStream.BITOR:
            return mapBinaryOperation(JsBinaryOperator.BIT_OR, node);
        case TokenStream.BITXOR:
            return mapBinaryOperation(JsBinaryOperator.BIT_XOR, node);
        case TokenStream.ASSIGN:
            return mapAssignmentVariant(node);
        case TokenStream.RELOP:
            return mapRelationalVariant(node);
        case TokenStream.EQOP:
            return mapEqualityVariant(node);
        case TokenStream.SHOP:
            return mapShiftVariant(node);
        case TokenStream.UNARYOP:
            return mapUnaryVariant(node);
        case TokenStream.INC:
            return mapIncDecFixity(JsUnaryOperator.INC, node);
        case TokenStream.DEC:
            return mapIncDecFixity(JsUnaryOperator.DEC, node);
        case TokenStream.HOOK:
            return mapConditional(node);
        case TokenStream.STRING:
            {
                SourceInfo info = makeSourceInfoDistinct(node);
                info.addCorrelation(info.getCorrelator().by(Literal.STRING));
                return new JsStringLiteral(info, node.getString());
            }
        case TokenStream.NUMBER:
            return mapNumber(node);
        case TokenStream.CALL:
            return mapCall(node);
        case TokenStream.GETPROP:
            return mapGetProp(node);
        case TokenStream.SETPROP:
            return mapSetProp(node);
        case TokenStream.DELPROP:
            return mapDeleteProp(node);
        case TokenStream.IF:
            return mapIfStatement(node);
        case TokenStream.WHILE:
            return mapDoOrWhileStatement(true, node);
        case TokenStream.DO:
            return mapDoOrWhileStatement(false, node);
        case TokenStream.FOR:
            return mapForStatement(node);
        case TokenStream.WITH:
            return mapWithStatement(node);
        case TokenStream.GETELEM:
            return mapGetElem(node);
        case TokenStream.SETELEM:
            return mapSetElem(node);
        case TokenStream.FUNCTION:
            return mapFunction(node);
        case TokenStream.BLOCK:
            return mapBlock(node);
        case TokenStream.SETNAME:
            return mapBinaryOperation(JsBinaryOperator.ASG, node);
        case TokenStream.NAME:
        case TokenStream.BINDNAME:
            return mapName(node);
        case TokenStream.RETURN:
            return mapReturn(node);
        case TokenStream.BREAK:
            return mapBreak(node);
        case TokenStream.CONTINUE:
            return mapContinue(node);
        case TokenStream.OBJLIT:
            return mapObjectLiteral(node);
        case TokenStream.ARRAYLIT:
            return mapArrayLit(node);
        case TokenStream.VAR:
            return mapVar(node);
        case TokenStream.PRIMARY:
            return mapPrimary(node);
        case TokenStream.COMMA:
            return mapBinaryOperation(JsBinaryOperator.COMMA, node);
        case TokenStream.NEW:
            return mapNew(node);
        case TokenStream.THROW:
            return mapThrowStatement(node);
        case TokenStream.TRY:
            return mapTryStatement(node);
        case TokenStream.SWITCH:
            return mapSwitchStatement(node);
        case TokenStream.LABEL:
            return mapLabel(node);
        default:
            int tokenType = node.getType();
            throw createParserException("Unexpected top-level token type: " + tokenType, node);
    }
}
245211.8720144hadoop
public static EventBatch translate(FSEditLogOp op) {
    switch(op.opCode) {
        case OP_ADD:
            FSEditLogOp.AddOp addOp = (FSEditLogOp.AddOp) op;
            if (addOp.blocks.length == 0) {
                return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(addOp.path).ctime(addOp.atime).replication(addOp.replication).ownerName(addOp.permissions.getUserName()).groupName(addOp.permissions.getGroupName()).perms(addOp.permissions.getPermission()).overwrite(addOp.overwrite).defaultBlockSize(addOp.blockSize).erasureCoded(addOp.erasureCodingPolicyId != ErasureCodeConstants.REPLICATION_POLICY_ID).iNodeType(Event.CreateEvent.INodeType.FILE).build() });
            } else {
                return new EventBatch(op.txid, new Event[] { new Event.AppendEvent.Builder().path(addOp.path).build() });
            }
        case OP_CLOSE:
            FSEditLogOp.CloseOp cOp = (FSEditLogOp.CloseOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.CloseEvent(cOp.path, getSize(cOp), cOp.mtime) });
        case OP_APPEND:
            FSEditLogOp.AppendOp appendOp = (FSEditLogOp.AppendOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.AppendEvent.Builder().path(appendOp.path).newBlock(appendOp.newBlock).build() });
        case OP_SET_REPLICATION:
            FSEditLogOp.SetReplicationOp setRepOp = (FSEditLogOp.SetReplicationOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.REPLICATION).path(setRepOp.path).replication(setRepOp.replication).build() });
        case OP_CONCAT_DELETE:
            FSEditLogOp.ConcatDeleteOp cdOp = (FSEditLogOp.ConcatDeleteOp) op;
            List<Event> events = Lists.newArrayList();
            events.add(new Event.AppendEvent.Builder().path(cdOp.trg).build());
            for (String src : cdOp.srcs) {
                events.add(new Event.UnlinkEvent.Builder().path(src).timestamp(cdOp.timestamp).build());
            }
            events.add(new Event.CloseEvent(cdOp.trg, -1, cdOp.timestamp));
            return new EventBatch(op.txid, events.toArray(new Event[0]));
        case OP_RENAME_OLD:
            FSEditLogOp.RenameOldOp rnOpOld = (FSEditLogOp.RenameOldOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.RenameEvent.Builder().srcPath(rnOpOld.src).dstPath(rnOpOld.dst).timestamp(rnOpOld.timestamp).build() });
        case OP_RENAME:
            FSEditLogOp.RenameOp rnOp = (FSEditLogOp.RenameOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.RenameEvent.Builder().srcPath(rnOp.src).dstPath(rnOp.dst).timestamp(rnOp.timestamp).build() });
        case OP_DELETE:
            FSEditLogOp.DeleteOp delOp = (FSEditLogOp.DeleteOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.UnlinkEvent.Builder().path(delOp.path).timestamp(delOp.timestamp).build() });
        case OP_MKDIR:
            FSEditLogOp.MkdirOp mkOp = (FSEditLogOp.MkdirOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(mkOp.path).ctime(mkOp.timestamp).ownerName(mkOp.permissions.getUserName()).groupName(mkOp.permissions.getGroupName()).perms(mkOp.permissions.getPermission()).iNodeType(Event.CreateEvent.INodeType.DIRECTORY).build() });
        case OP_SET_PERMISSIONS:
            FSEditLogOp.SetPermissionsOp permOp = (FSEditLogOp.SetPermissionsOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.PERMS).path(permOp.src).perms(permOp.permissions).build() });
        case OP_SET_OWNER:
            FSEditLogOp.SetOwnerOp ownOp = (FSEditLogOp.SetOwnerOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).path(ownOp.src).ownerName(ownOp.username).groupName(ownOp.groupname).build() });
        case OP_TIMES:
            FSEditLogOp.TimesOp timesOp = (FSEditLogOp.TimesOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).path(timesOp.path).atime(timesOp.atime).mtime(timesOp.mtime).build() });
        case OP_SYMLINK:
            FSEditLogOp.SymlinkOp symOp = (FSEditLogOp.SymlinkOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(symOp.path).ctime(symOp.atime).ownerName(symOp.permissionStatus.getUserName()).groupName(symOp.permissionStatus.getGroupName()).perms(symOp.permissionStatus.getPermission()).symlinkTarget(symOp.value).iNodeType(Event.CreateEvent.INodeType.SYMLINK).build() });
        case OP_REMOVE_XATTR:
            FSEditLogOp.RemoveXAttrOp rxOp = (FSEditLogOp.RemoveXAttrOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS).path(rxOp.src).xAttrs(rxOp.xAttrs).xAttrsRemoved(true).build() });
        case OP_SET_XATTR:
            FSEditLogOp.SetXAttrOp sxOp = (FSEditLogOp.SetXAttrOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS).path(sxOp.src).xAttrs(sxOp.xAttrs).xAttrsRemoved(false).build() });
        case OP_SET_ACL:
            FSEditLogOp.SetAclOp saOp = (FSEditLogOp.SetAclOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder().metadataType(Event.MetadataUpdateEvent.MetadataType.ACLS).path(saOp.src).acls(saOp.aclEntries).build() });
        case OP_TRUNCATE:
            FSEditLogOp.TruncateOp tOp = (FSEditLogOp.TruncateOp) op;
            return new EventBatch(op.txid, new Event[] { new Event.TruncateEvent(tOp.src, tOp.newLength, tOp.timestamp) });
        default:
            return null;
    }
}
243191.1138121hadoop
private int doWork(final String[] args) throws IOException {
    final StringBuilder url = new StringBuilder();
    url.append("/fsck?ugi=").append(ugi.getShortUserName());
    String dir = null;
    boolean doListCorruptFileBlocks = false;
    for (int idx = 0; idx < args.length; idx++) {
        if (args[idx].equals("-move")) {
            url.append("&move=1");
        } else if (args[idx].equals("-delete")) {
            url.append("&delete=1");
        } else if (args[idx].equals("-files")) {
            url.append("&files=1");
        } else if (args[idx].equals("-openforwrite")) {
            url.append("&openforwrite=1");
        } else if (args[idx].equals("-blocks")) {
            url.append("&blocks=1");
        } else if (args[idx].equals("-locations")) {
            url.append("&locations=1");
        } else if (args[idx].equals("-racks")) {
            url.append("&racks=1");
        } else if (args[idx].equals("-replicaDetails")) {
            url.append("&replicadetails=1");
        } else if (args[idx].equals("-upgradedomains")) {
            url.append("&upgradedomains=1");
        } else if (args[idx].equals("-storagepolicies")) {
            url.append("&storagepolicies=1");
        } else if (args[idx].equals("-showprogress")) {
            url.append("&showprogress=1");
        } else if (args[idx].equals("-list-corruptfileblocks")) {
            url.append("&listcorruptfileblocks=1");
            doListCorruptFileBlocks = true;
        } else if (args[idx].equals("-includeSnapshots")) {
            url.append("&includeSnapshots=1");
        } else if (args[idx].equals("-maintenance")) {
            url.append("&maintenance=1");
        } else if (args[idx].equals("-blockId")) {
            StringBuilder sb = new StringBuilder();
            idx++;
            while (idx < args.length && !args[idx].startsWith("-")) {
                sb.append(args[idx]);
                sb.append(" ");
                idx++;
            }
            url.append("&blockId=").append(URLEncoder.encode(sb.toString(), "UTF-8"));
        } else if (args[idx].equals("-replicate")) {
            url.append("&replicate=1");
        } else if (!args[idx].startsWith("-")) {
            if (null == dir) {
                dir = args[idx];
            } else {
                System.err.println("fsck: can only operate on one path at a time '" + args[idx] + "'");
                printUsage(System.err);
                return -1;
            }
        } else {
            System.err.println("fsck: Illegal option '" + args[idx] + "'");
            printUsage(System.err);
            return -1;
        }
    }
    if (null == dir) {
        dir = "/";
    }
    Path dirpath = null;
    URI namenodeAddress = null;
    try {
        dirpath = getResolvedPath(dir);
        namenodeAddress = getCurrentNamenodeAddress(dirpath);
    } catch (IOException ioe) {
        System.err.println("FileSystem is inaccessible due to:\n" + ioe.toString());
    }
    if (namenodeAddress == null) {
        System.err.println("DFSck exiting.");
        return 0;
    }
    url.insert(0, namenodeAddress.toString());
    url.append("&path=").append(URLEncoder.encode(Path.getPathWithoutSchemeAndAuthority(dirpath).toString(), "UTF-8"));
    System.err.println("Connecting to namenode via " + url.toString());
    if (doListCorruptFileBlocks) {
        return listCorruptFileBlocks(dir, url.toString());
    }
    URL path = new URL(url.toString());
    URLConnection connection;
    try {
        connection = connectionFactory.openConnection(path, isSpnegoEnabled);
    } catch (AuthenticationException e) {
        throw new IOException(e);
    }
    InputStream stream = connection.getInputStream();
    BufferedReader input = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8));
    String line = null;
    String lastLine = NamenodeFsck.CORRUPT_STATUS;
    int errCode = -1;
    try {
        while ((line = input.readLine()) != null) {
            out.println(line);
            lastLine = line;
        }
    } finally {
        input.close();
    }
    if (lastLine.endsWith(NamenodeFsck.HEALTHY_STATUS)) {
        errCode = 0;
    } else if (lastLine.endsWith(NamenodeFsck.CORRUPT_STATUS)) {
        errCode = 1;
    } else if (lastLine.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) {
        errCode = 0;
    } else if (lastLine.contains("Incorrect blockId format:")) {
        errCode = 0;
    } else if (lastLine.endsWith(NamenodeFsck.EXCESS_STATUS)) {
        errCode = 0;
    } else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONED_STATUS)) {
        errCode = 2;
    } else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONING_STATUS)) {
        errCode = 3;
    } else if (lastLine.endsWith(NamenodeFsck.IN_MAINTENANCE_STATUS)) {
        errCode = 4;
    } else if (lastLine.endsWith(NamenodeFsck.ENTERING_MAINTENANCE_STATUS)) {
        errCode = 5;
    } else if (lastLine.endsWith(NamenodeFsck.STALE_STATUS)) {
        errCode = 6;
    }
    return errCode;
}
244509.491191hadoop
private void verifyDescendantDiffReports(final Path snapDir, final Path snapSubDir, final Path snapSubSubDir) throws IOException {
    verifyDiffReport(snapDir, "s0", "s2", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")));
    verifyDiffReport(snapSubDir, "s0", "s2", new DiffReportEntry[] {});
    verifyDiffReport(snapSubSubDir, "s0", "s2", new DiffReportEntry[] {});
    verifyDiffReport(snapDir, "s0", "s8", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file15")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1/subsubsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
    verifyDiffReport(snapSubDir, "s0", "s8", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsubsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file15")));
    verifyDiffReport(snapSubSubDir, "s0", "s8", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")));
    verifyDiffReport(snapDir, "s2", "s5", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file15")));
    verifyDiffReport(snapSubDir, "s2", "s5", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")));
    verifyDiffReport(snapSubSubDir, "s2", "s5", new DiffReportEntry[] {});
    verifyDiffReport(snapDir, "s3", "", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file15")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1/file12")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1/file10")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/file11")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/link13")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1/link13")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsub1/subsubsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
    verifyDiffReport(snapSubDir, "s3", "", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("subsubsub1")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("subsubsub1/file15")));
    verifyDiffReport(snapSubSubDir, "s3", "", new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file10")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")), new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")));
}
243860.9515169hadoop
public void testMaxRelativeExpiry() throws Exception {
    try {
        dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
        fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("negative", e);
    }
    try {
        dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
        fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
        GenericTestUtils.assertExceptionContains("too big", e);
    }
    CachePoolInfo coolPool = new CachePoolInfo("coolPool");
    final long poolExpiration = 1000 * 60 * 10l;
    dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
    RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
    CachePoolInfo listPool = poolIt.next().getInfo();
    assertFalse("Should only be one pool", poolIt.hasNext());
    assertEquals("Expected max relative expiry to match set value", poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
    try {
        dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
        fail("Added a pool with a negative max expiry.");
    } catch (InvalidRequestException e) {
        assertExceptionContains("negative", e);
    }
    try {
        dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
        fail("Added a pool with too big of a max expiry.");
    } catch (InvalidRequestException e) {
        assertExceptionContains("too big", e);
    }
    CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
    dfs.addCacheDirective(defaultExpiry);
    RemoteIterator<CacheDirectiveEntry> dirIt = dfs.listCacheDirectives(defaultExpiry);
    CacheDirectiveInfo listInfo = dirIt.next().getInfo();
    assertFalse("Should only have one entry in listing", dirIt.hasNext());
    long listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
    assertTrue("Directive expiry should be approximately the pool's max expiry", Math.abs(listExpiration - poolExpiration) < 10 * 1000);
    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
        fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
        fail("Added a directive that exceeds pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
        fail("Added a directive with a gigantic max value");
    } catch (IllegalArgumentException e) {
        assertExceptionContains("is too far in the future", e);
    }
    try {
        dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
        fail("Added a directive with a gigantic max value");
    } catch (InvalidRequestException e) {
        assertExceptionContains("is too far in the future", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
        fail("Modified a directive to exceed pool's max relative expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("is too far in the future", e);
    }
    CachePoolInfo destPool = new CachePoolInfo("destPool");
    dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
    try {
        dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
        fail("Modified a directive to a pool with a lower max expiration");
    } catch (InvalidRequestException e) {
        assertExceptionContains("exceeds the max relative expiration", e);
    }
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
    dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
    listInfo = dirIt.next().getInfo();
    listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
    assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately " + poolExpiration / 2, Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
    dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
    poolIt = dfs.listCachePools();
    listPool = poolIt.next().getInfo();
    while (!listPool.getPoolName().equals(destPool.getPoolName())) {
        listPool = poolIt.next().getInfo();
    }
    assertEquals("Expected max relative expiry to match set value", CachePoolInfo.RELATIVE_EXPIRY_NEVER, listPool.getMaxRelativeExpiryMs().longValue());
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
    dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
245667.299161hadoop
public void testNameNodeMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    Long maxLockedMemory = getMemlockLimit(NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, maxLockedMemory);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(4).build();
        cluster.waitActive();
        String upgradeDomain = "abcd";
        DatanodeManager dm = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager();
        DatanodeDescriptor dd = dm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
        dd.setUpgradeDomain(upgradeDomain);
        String dnXferAddrWithUpgradeDomainSet = dd.getXferAddr();
        DatanodeDescriptor maintenanceNode = dm.getDatanode(cluster.getDataNodes().get(1).getDatanodeId());
        maintenanceNode.setInMaintenance();
        String dnXferAddrInMaintenance = maintenanceNode.getXferAddr();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
        assertEquals(fsn.getClusterId(), clusterId);
        String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
        assertEquals(fsn.getBlockPoolId(), blockpoolId);
        String version = (String) mbs.getAttribute(mxbeanName, "Version");
        assertEquals(fsn.getVersion(), version);
        assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
        Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
        assertEquals(fsn.getUsed(), used.longValue());
        Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
        assertEquals(fsn.getTotal(), total.longValue());
        String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
        assertEquals(fsn.getSafemode(), safemode);
        Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
        assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
        Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
        assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
        Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
        assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
        String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
        assertTrue(liveNodes.size() == 4);
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
            assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) >= 0);
            assertTrue(liveNode.containsKey("capacity"));
            assertTrue(((Long) liveNode.get("capacity")) > 0);
            assertTrue(liveNode.containsKey("numBlocks"));
            assertTrue(((Long) liveNode.get("numBlocks")) == 0);
            assertTrue(liveNode.containsKey("lastBlockReport"));
            String xferAddr = (String) liveNode.get("xferaddr");
            if (!xferAddr.equals(dnXferAddrWithUpgradeDomainSet)) {
                assertTrue(!liveNode.containsKey("upgradeDomain"));
            } else {
                assertTrue(liveNode.get("upgradeDomain").equals(upgradeDomain));
            }
            boolean inMaintenance = liveNode.get("adminState").equals(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString());
            assertFalse(xferAddr.equals(dnXferAddrInMaintenance) ^ inMaintenance);
        }
        assertEquals(fsn.getLiveNodes(), alivenodeinfo);
        DatanodeDescriptor decommissioningNode = dm.getDatanode(cluster.getDataNodes().get(2).getDatanodeId());
        decommissioningNode.startDecommission();
        DatanodeDescriptor decommissionedNode = dm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
        decommissionedNode.setDecommissioned();
        String alivenodeinfo1 = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes1 = (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo1);
        for (Map<String, Object> liveNode : liveNodes1.values()) {
            assertTrue(liveNode.containsKey("location"));
            assertTrue(liveNode.containsKey("uuid"));
        }
        String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
        assertEquals(fsn.getDeadNodes(), deadNodeInfo);
        String nodeUsage = (String) (mbs.getAttribute(mxbeanName, "NodeUsage"));
        assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
        String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus"));
        assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
        String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, "JournalTransactionInfo");
        assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), journalTxnInfo);
        String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
        assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
        String corruptFiles = (String) (mbs.getAttribute(mxbeanName, "CorruptFiles"));
        assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
        int corruptFilesCount = (int) (mbs.getAttribute(mxbeanName, "CorruptFilesCount"));
        assertEquals("Bad value for CorruptFilesCount", fsn.getCorruptFilesCount(), corruptFilesCount);
        String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
        Map<String, Map<String, String>> statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        Collection<URI> nameDirUris = cluster.getNameDirs(0);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
            assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(2, statusMap.get("active").size());
        assertEquals(0, statusMap.get("failed").size());
        File failedNameDir = new File(nameDirUris.iterator().next());
        assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
        cluster.getNameNodeRpc().rollEditLog();
        nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
            System.out.println("Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
            assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(1, statusMap.get("active").size());
        assertEquals(1, statusMap.get("failed").size());
        assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
        assertEquals(maxLockedMemory * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity"));
        assertNull("RollingUpgradeInfo should be null when there is no rolling" + " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
    } finally {
        if (cluster != null) {
            for (URI dir : cluster.getNameDirs(0)) {
                FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
            }
            cluster.shutdown();
        }
    }
}
244734.218152hadoop
public void testDeleteCorruptReplicaForUnderReplicatedBlock() throws Exception {
    final Path file = new Path("/test-file");
    final int numDatanode = 3;
    final short replicationFactor = 2;
    final int numStoppedNodes = 2;
    final int numDecommNodes = 1;
    assertEquals(numDatanode, numStoppedNodes + numDecommNodes);
    final int datanodeAdminMonitorFixedRateSeconds = 5;
    getConf().setInt(MiniDFSCluster.DFS_NAMENODE_DECOMMISSION_INTERVAL_TESTING_KEY, datanodeAdminMonitorFixedRateSeconds);
    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
    getConf().setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT);
    getConf().set(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, "ALWAYS");
    getConf().setBoolean(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, true);
    final List<DatanodeDescriptor> allNodes = new ArrayList<>();
    final List<DatanodeDescriptor> stoppedNodes = new ArrayList<>();
    final DatanodeDescriptor decommNode;
    startCluster(1, numDatanode);
    getCluster().waitActive();
    final FSNamesystem namesystem = getCluster().getNamesystem();
    final BlockManager blockManager = namesystem.getBlockManager();
    final DatanodeManager datanodeManager = blockManager.getDatanodeManager();
    final DatanodeAdminManager decomManager = datanodeManager.getDatanodeAdminManager();
    final FileSystem fs = getCluster().getFileSystem();
    for (final DataNode node : getCluster().getDataNodes()) {
        allNodes.add(getDatanodeDesriptor(namesystem, node.getDatanodeUuid()));
    }
    LOG.info("Creating Initial Block with {} FINALIZED replicas", replicationFactor);
    FSDataOutputStream out = fs.create(file, replicationFactor);
    for (int i = 0; i < 512; i++) {
        out.write(i);
    }
    out.close();
    assertEquals(1, blockManager.getTotalBlocks());
    BlockLocation[] blocksInFile = fs.getFileBlockLocations(file, 0, 0);
    assertEquals(1, blocksInFile.length);
    List<String> replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
    assertEquals(replicationFactor, replicasInBlock.size());
    DatanodeDescriptor decommNodeTmp = null;
    for (DatanodeDescriptor node : allNodes) {
        if (replicasInBlock.contains(node.getName())) {
            stoppedNodes.add(node);
        } else {
            decommNodeTmp = node;
        }
    }
    assertEquals(numStoppedNodes, stoppedNodes.size());
    assertNotNull(decommNodeTmp);
    decommNode = decommNodeTmp;
    final DatanodeDescriptor firstStoppedNode = stoppedNodes.get(0);
    final DatanodeDescriptor secondStoppedNode = stoppedNodes.get(1);
    LOG.info("Detected 2 nodes with replicas : {} , {}", firstStoppedNode.getXferAddr(), secondStoppedNode.getXferAddr());
    LOG.info("Detected 1 node without replica : {}", decommNode.getXferAddr());
    LOG.info("Stopping first node with replica {}", firstStoppedNode.getXferAddr());
    final List<MiniDFSCluster.DataNodeProperties> stoppedNodeProps = new ArrayList<>();
    MiniDFSCluster.DataNodeProperties stoppedNodeProp = getCluster().stopDataNode(firstStoppedNode.getXferAddr());
    stoppedNodeProps.add(stoppedNodeProp);
    firstStoppedNode.setLastUpdate(213);
    GenericTestUtils.waitFor(() -> 2 == datanodeManager.getNumLiveDataNodes() && 1 == datanodeManager.getNumDeadDataNodes(), 500, 30000);
    appendBlock(fs, file, 2);
    LOG.info("Stopping second node with replica {}", secondStoppedNode.getXferAddr());
    stoppedNodeProp = getCluster().stopDataNode(secondStoppedNode.getXferAddr());
    stoppedNodeProps.add(stoppedNodeProp);
    secondStoppedNode.setLastUpdate(213);
    GenericTestUtils.waitFor(() -> numDecommNodes == datanodeManager.getNumLiveDataNodes() && numStoppedNodes == datanodeManager.getNumDeadDataNodes(), 500, 30000);
    appendBlock(fs, file, 1);
    blocksInFile = fs.getFileBlockLocations(file, 0, 0);
    assertEquals(1, blocksInFile.length);
    replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
    assertEquals(numDecommNodes, replicasInBlock.size());
    assertTrue(replicasInBlock.contains(decommNode.getName()));
    LOG.info("Block now has 2 corrupt replicas on [{} , {}] and 1 live replica on {}", firstStoppedNode.getXferAddr(), secondStoppedNode.getXferAddr(), decommNode.getXferAddr());
    LOG.info("Decommission node {} with the live replica", decommNode.getXferAddr());
    final ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>();
    takeNodeOutofService(0, decommNode.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
    try {
        GenericTestUtils.waitFor(() -> decomManager.getNumTrackedNodes() == 0 && decomManager.getNumPendingNodes() == numDecommNodes && decommNode.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS), 500, 30000);
    } catch (Exception e) {
        blocksInFile = fs.getFileBlockLocations(file, 0, 0);
        assertEquals(1, blocksInFile.length);
        replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
        String errMsg = String.format("Node %s failed to start decommissioning." + " numTrackedNodes=%d , numPendingNodes=%d , adminState=%s , nodesWithReplica=[%s]", decommNode.getXferAddr(), decomManager.getNumTrackedNodes(), decomManager.getNumPendingNodes(), decommNode.getAdminState(), String.join(", ", replicasInBlock));
        LOG.error(errMsg);
        fail(errMsg);
    }
    blocksInFile = fs.getFileBlockLocations(file, 0, 0);
    assertEquals(1, blocksInFile.length);
    replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
    assertEquals(numDecommNodes, replicasInBlock.size());
    assertEquals(replicasInBlock.get(0), decommNode.getName());
    LOG.info("Block now has 2 corrupt replicas on [{} , {}] and 1 decommissioning replica on {}", firstStoppedNode.getXferAddr(), secondStoppedNode.getXferAddr(), decommNode.getXferAddr());
    LOG.info("Restarting stopped nodes {} , {}", firstStoppedNode.getXferAddr(), secondStoppedNode.getXferAddr());
    for (final MiniDFSCluster.DataNodeProperties stoppedNode : stoppedNodeProps) {
        assertTrue(getCluster().restartDataNode(stoppedNode));
    }
    for (final MiniDFSCluster.DataNodeProperties stoppedNode : stoppedNodeProps) {
        try {
            getCluster().waitDatanodeFullyStarted(stoppedNode.getDatanode(), 30000);
            LOG.info("Node {} Restarted", stoppedNode.getDatanode().getXferAddress());
        } catch (Exception e) {
            String errMsg = String.format("Node %s Failed to Restart within 30 seconds", stoppedNode.getDatanode().getXferAddress());
            LOG.error(errMsg);
            fail(errMsg);
        }
    }
    for (MiniDFSCluster.DataNodeProperties dnProps : stoppedNodeProps) {
        DataNodeTestUtils.triggerBlockReport(dnProps.getDatanode());
    }
    final int checkEveryMillis = datanodeAdminMonitorFixedRateSeconds * 2 * 1000;
    try {
        GenericTestUtils.waitFor(() -> {
            blockManager.clearQueues();
            return decomManager.getNumTrackedNodes() == 0 && decomManager.getNumPendingNodes() == 0 && decommNode.getAdminState().equals(AdminStates.DECOMMISSIONED);
        }, checkEveryMillis, 40000);
    } catch (Exception e) {
        blocksInFile = fs.getFileBlockLocations(file, 0, 0);
        assertEquals(1, blocksInFile.length);
        replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
        String errMsg = String.format("Node %s failed to complete decommissioning." + " numTrackedNodes=%d , numPendingNodes=%d , adminState=%s , nodesWithReplica=[%s]", decommNode.getXferAddr(), decomManager.getNumTrackedNodes(), decomManager.getNumPendingNodes(), decommNode.getAdminState(), String.join(", ", replicasInBlock));
        LOG.error(errMsg);
        fail(errMsg);
    }
    blocksInFile = fs.getFileBlockLocations(file, 0, 0);
    assertEquals(1, blocksInFile.length);
    replicasInBlock = Arrays.asList(blocksInFile[0].getNames());
    assertEquals(numDatanode, replicasInBlock.size());
    assertTrue(replicasInBlock.contains(decommNode.getName()));
    for (final DatanodeDescriptor node : stoppedNodes) {
        assertTrue(replicasInBlock.contains(node.getName()));
    }
    LOG.info("Block now has 2 live replicas on [{} , {}] and 1 decommissioned replica on {}", firstStoppedNode.getXferAddr(), secondStoppedNode.getXferAddr(), decommNode.getXferAddr());
}
243829.1231130hadoop
public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
    RpcCall rpcCall = (RpcCall) info.header();
    final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();
    Credentials credentials = rpcCall.getCredential();
    if (nfsproc3 != NFSPROC3.NULL) {
        if (credentials.getFlavor() != AuthFlavor.AUTH_SYS && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
            LOG.info("Wrong RPC AUTH flavor, {} is not AUTH_SYS or RPCSEC_GSS.", credentials.getFlavor());
            XDR reply = new XDR();
            RpcDeniedReply rdr = new RpcDeniedReply(xid, RpcReply.ReplyState.MSG_ACCEPTED, RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
            rdr.write(reply);
            ByteBuf buf = Unpooled.wrappedBuffer(reply.asReadOnlyWrap().buffer());
            RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
            RpcUtil.sendRpcResponse(ctx, rsp);
            return;
        }
    }
    if (!isIdempotent(rpcCall)) {
        RpcCallCache.CacheEntry entry = rpcCallCache.checkOrAddToCache(client, xid);
        if (entry != null) {
            if (entry.isCompleted()) {
                LOG.info("Sending the cached reply to retransmitted request {}", xid);
                RpcUtil.sendRpcResponse(ctx, entry.getResponse());
                return;
            } else {
                LOG.info("Retransmitted request, transaction still in progress {}", xid);
                return;
            }
        }
    }
    final long startTime = System.nanoTime();
    NFS3Response response = null;
    if (nfsproc3 == NFSPROC3.NULL) {
        response = nullProcedure();
    } else if (nfsproc3 == NFSPROC3.GETATTR) {
        response = getattr(xdr, info);
        metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.SETATTR) {
        response = setattr(xdr, info);
        metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.LOOKUP) {
        response = lookup(xdr, info);
        metrics.addLookup(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.ACCESS) {
        response = access(xdr, info);
        metrics.addAccess(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READLINK) {
        response = readlink(xdr, info);
        metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READ) {
        LOG.debug("{}{}", Nfs3Utils.READ_RPC_START, xid);
        response = read(xdr, info);
        LOG.debug("{}{}", Nfs3Utils.READ_RPC_END, xid);
        metrics.addRead(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.WRITE) {
        LOG.debug("{}{}", Nfs3Utils.WRITE_RPC_START, xid);
        response = write(xdr, info);
    } else if (nfsproc3 == NFSPROC3.CREATE) {
        response = create(xdr, info);
        metrics.addCreate(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.MKDIR) {
        response = mkdir(xdr, info);
        metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.SYMLINK) {
        response = symlink(xdr, info);
        metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.MKNOD) {
        response = mknod(xdr, info);
        metrics.addMknod(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.REMOVE) {
        response = remove(xdr, info);
        metrics.addRemove(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.RMDIR) {
        response = rmdir(xdr, info);
        metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.RENAME) {
        response = rename(xdr, info);
        metrics.addRename(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.LINK) {
        response = link(xdr, info);
        metrics.addLink(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READDIR) {
        response = readdir(xdr, info);
        metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.READDIRPLUS) {
        response = readdirplus(xdr, info);
        metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.FSSTAT) {
        response = fsstat(xdr, info);
        metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.FSINFO) {
        response = fsinfo(xdr, info);
        metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.PATHCONF) {
        response = pathconf(xdr, info);
        metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime));
    } else if (nfsproc3 == NFSPROC3.COMMIT) {
        response = commit(xdr, info);
    } else {
        RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(out);
    }
    if (response == null) {
        LOG.debug("No sync response, expect an async response for request XID={}", rpcCall.getXid());
        return;
    }
    out = response.serialize(out, xid, new VerifierNone());
    ByteBuf buf = Unpooled.wrappedBuffer(out.asReadOnlyWrap().buffer());
    RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
    if (!isIdempotent(rpcCall)) {
        rpcCallCache.callCompleted(client, xid, rsp);
    }
    RpcUtil.sendRpcResponse(ctx, rsp);
}
242976.432137hadoop
public Object invokeMethod(final UserGroupInformation ugi, final List<? extends FederationNamenodeContext> namenodes, boolean useObserver, final Class<?> protocol, final Method method, final Object... params) throws ConnectException, StandbyException, IOException {
    if (namenodes == null || namenodes.isEmpty()) {
        throw new IOException("No namenodes to invoke " + method.getName() + " with params " + Arrays.deepToString(params) + " from " + router.getRouterId());
    }
    addClientInfoToCallerContext(ugi);
    Object ret = null;
    if (rpcMonitor != null) {
        rpcMonitor.proxyOp();
    }
    boolean failover = false;
    boolean shouldUseObserver = useObserver;
    Map<FederationNamenodeContext, IOException> ioes = new LinkedHashMap<>();
    for (FederationNamenodeContext namenode : namenodes) {
        if (!shouldUseObserver && (namenode.getState() == FederationNamenodeServiceState.OBSERVER)) {
            continue;
        }
        ConnectionContext connection = null;
        String nsId = namenode.getNameserviceId();
        String rpcAddress = namenode.getRpcAddress();
        try {
            connection = this.getConnection(ugi, nsId, rpcAddress, protocol);
            ProxyAndInfo<?> client = connection.getClient();
            final Object proxy = client.getProxy();
            ret = invoke(nsId, namenode, useObserver, 0, method, proxy, params);
            if (failover && FederationNamenodeServiceState.OBSERVER != namenode.getState()) {
                InetSocketAddress address = client.getAddress();
                namenodeResolver.updateActiveNamenode(nsId, address);
            }
            if (this.rpcMonitor != null) {
                this.rpcMonitor.proxyOpComplete(true, nsId, namenode.getState());
            }
            if (this.router.getRouterClientMetrics() != null) {
                this.router.getRouterClientMetrics().incInvokedMethod(method);
            }
            return ret;
        } catch (IOException ioe) {
            ioes.put(namenode, ioe);
            if (ioe instanceof ObserverRetryOnActiveException) {
                LOG.info("Encountered ObserverRetryOnActiveException from {}." + " Retry active namenode directly.", namenode);
                shouldUseObserver = false;
            } else if (ioe instanceof StandbyException) {
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpFailureStandby(nsId);
                }
                failover = true;
            } else if (isUnavailableException(ioe)) {
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpFailureCommunicate(nsId);
                }
                if (FederationNamenodeServiceState.OBSERVER == namenode.getState()) {
                    namenodeResolver.updateUnavailableNamenode(nsId, NetUtils.createSocketAddr(namenode.getRpcAddress()));
                } else {
                    failover = true;
                }
            } else if (ioe instanceof RemoteException) {
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpComplete(true, nsId, namenode.getState());
                }
                RemoteException re = (RemoteException) ioe;
                ioe = re.unwrapRemoteException();
                ioe = getCleanException(ioe);
                throw ioe;
            } else if (ioe instanceof ConnectionNullException) {
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpFailureCommunicate(nsId);
                }
                LOG.error("Get connection for {} {} error: {}", nsId, rpcAddress, ioe.getMessage());
                StandbyException se = new StandbyException(ioe.getMessage());
                se.initCause(ioe);
                throw se;
            } else if (ioe instanceof NoNamenodesAvailableException) {
                IOException cause = (IOException) ioe.getCause();
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpNoNamenodes(nsId);
                }
                LOG.error("Cannot get available namenode for {} {} error: {}", nsId, rpcAddress, ioe.getMessage());
                if (shouldRotateCache(cause)) {
                    this.namenodeResolver.rotateCache(nsId, namenode, useObserver);
                }
                throw new RetriableException(ioe);
            } else {
                if (this.rpcMonitor != null) {
                    this.rpcMonitor.proxyOpFailureCommunicate(nsId);
                    this.rpcMonitor.proxyOpComplete(false, nsId, namenode.getState());
                }
                throw ioe;
            }
        } finally {
            if (connection != null) {
                connection.release();
            }
        }
    }
    if (this.rpcMonitor != null) {
        this.rpcMonitor.proxyOpComplete(false, null, null);
    }
    String msg = "No namenode available to invoke " + method.getName() + " " + Arrays.deepToString(params) + " in " + namenodes + " from " + router.getRouterId();
    LOG.error(msg);
    int exConnect = 0;
    for (Entry<FederationNamenodeContext, IOException> entry : ioes.entrySet()) {
        FederationNamenodeContext namenode = entry.getKey();
        String nnKey = namenode.getNamenodeKey();
        String addr = namenode.getRpcAddress();
        IOException ioe = entry.getValue();
        if (ioe instanceof StandbyException) {
            LOG.error("{} at {} is in Standby: {}", nnKey, addr, ioe.getMessage());
        } else if (isUnavailableException(ioe)) {
            exConnect++;
            LOG.error("{} at {} cannot be reached: {}", nnKey, addr, ioe.getMessage());
        } else {
            LOG.error("{} at {} error: \"{}\"", nnKey, addr, ioe.getMessage());
        }
    }
    if (exConnect == ioes.size()) {
        throw new ConnectException(msg);
    } else {
        throw new StandbyException(msg);
    }
}
246937.391163hadoop
public void testTimelineEventHandling() throws Exception {
    TestParams t = new TestParams(RunningAppContext.class, false);
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    long currentTime = System.currentTimeMillis();
    try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1)) {
        yarnCluster.init(conf);
        yarnCluster.start();
        Configuration confJHEH = new YarnConfiguration(conf);
        confJHEH.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
        confJHEH.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS, MiniYARNCluster.getHostname() + ":" + yarnCluster.getApplicationHistoryServer().getPort());
        JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
        jheh.init(confJHEH);
        jheh.start();
        TimelineStore ts = yarnCluster.getApplicationHistoryServer().getTimelineStore();
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1), currentTime - 10));
        jheh.getDispatcher().await();
        TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        TimelineEntity tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(0).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobSubmittedEvent(TypeConverter.fromYarn(t.jobId), "name", "user", 200, "/foo/job.xml", new HashMap<JobACL, AccessControlList>(), "default"), currentTime + 10));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"), currentTime - 20));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(3, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(2).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(4, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(3).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_JOB", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
        Assert.assertEquals(5, tEntity.getEvents().size());
        Assert.assertEquals(EventType.JOB_KILLED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(EventType.JOB_SUBMITTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(EventType.JOB_FINISHED.toString(), tEntity.getEvents().get(2).getEventType());
        Assert.assertEquals(EventType.AM_STARTED.toString(), tEntity.getEvents().get(3).getEventType());
        Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(), tEntity.getEvents().get(4).getEventType());
        Assert.assertEquals(currentTime + 20, tEntity.getEvents().get(0).getTimestamp());
        Assert.assertEquals(currentTime + 10, tEntity.getEvents().get(1).getTimestamp());
        Assert.assertEquals(currentTime, tEntity.getEvents().get(2).getTimestamp());
        Assert.assertEquals(currentTime - 10, tEntity.getEvents().get(3).getTimestamp());
        Assert.assertEquals(currentTime - 20, tEntity.getEvents().get(4).getTimestamp());
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(1, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(0).getEventType());
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
        jheh.getDispatcher().await();
        entities = ts.getEntities("MAPREDUCE_TASK", null, null, null, null, null, null, null, null, null);
        Assert.assertEquals(1, entities.getEntities().size());
        tEntity = entities.getEntities().get(0);
        Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
        Assert.assertEquals(2, tEntity.getEvents().size());
        Assert.assertEquals(EventType.TASK_STARTED.toString(), tEntity.getEvents().get(1).getEventType());
        Assert.assertEquals(TaskType.REDUCE.toString(), tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
        Assert.assertEquals(TaskType.MAP.toString(), tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE"));
    }
}
244266.5329127hadoop
protected void renderData(Block html) {
    TR<THEAD<TABLE<Hamlet>>> tr = html.table("#apps").thead().tr();
    for (ColumnHeader col : COLUMNS) {
        tr = tr.th(col.getSelector(), col.getCData());
    }
    TBODY<TABLE<Hamlet>> tbody = tr.__().__().tbody();
    StringBuilder appsTableData = new StringBuilder("[\n");
    for (ApplicationReport appReport : appReports) {
        if (!reqAppStates.isEmpty() && !reqAppStates.contains(appReport.getYarnApplicationState())) {
            continue;
        }
        AppInfo app = new AppInfo(appReport);
        ApplicationAttemptId appAttemptId = ApplicationAttemptId.fromString(app.getCurrentAppAttemptId());
        String queuePercent = "N/A";
        String clusterPercent = "N/A";
        if (appReport.getApplicationResourceUsageReport() != null) {
            queuePercent = String.format("%.1f", appReport.getApplicationResourceUsageReport().getQueueUsagePercentage());
            clusterPercent = String.format("%.1f", appReport.getApplicationResourceUsageReport().getClusterUsagePercentage());
        }
        String blacklistedNodesCount = "N/A";
        RMApp rmApp = rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId());
        boolean isAppInCompletedState = false;
        if (rmApp != null) {
            RMAppAttempt appAttempt = rmApp.getRMAppAttempt(appAttemptId);
            Set<String> nodes = null == appAttempt ? null : appAttempt.getBlacklistedNodes();
            if (nodes != null) {
                blacklistedNodesCount = String.valueOf(nodes.size());
            }
            isAppInCompletedState = rmApp.isAppInCompletedStates();
        }
        String percent = StringUtils.format("%.1f", app.getProgress());
        appsTableData.append("[\"<a href='").append(url("app", app.getAppId())).append("'>").append(app.getAppId()).append("</a>\",\"").append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app.getUser()))).append("\",\"").append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app.getName()))).append("\",\"").append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app.getType()))).append("\",\"").append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app.getApplicationTags() == null ? "" : app.getApplicationTags()))).append("\",\"").append(StringEscapeUtils.escapeEcmaScript(StringEscapeUtils.escapeHtml4(app.getQueue()))).append("\",\"").append(String.valueOf(app.getPriority())).append("\",\"").append(app.getStartedTime()).append("\",\"").append(app.getLaunchTime()).append("\",\"").append(app.getFinishedTime()).append("\",\"").append(app.getAppState() == null ? UNAVAILABLE : app.getAppState()).append("\",\"").append(app.getFinalAppStatus()).append("\",\"").append(app.getRunningContainers() == -1 ? "N/A" : String.valueOf(app.getRunningContainers())).append("\",\"").append(app.getAllocatedCpuVcores() == -1 ? "N/A" : String.valueOf(app.getAllocatedCpuVcores())).append("\",\"").append(app.getAllocatedMemoryMB() == -1 ? "N/A" : String.valueOf(app.getAllocatedMemoryMB())).append("\",\"").append((isAppInCompletedState && app.getAllocatedGpus() <= 0) ? UNAVAILABLE : String.valueOf(app.getAllocatedGpus())).append("\",\"").append(app.getReservedCpuVcores() == -1 ? "N/A" : String.valueOf(app.getReservedCpuVcores())).append("\",\"").append(app.getReservedMemoryMB() == -1 ? "N/A" : String.valueOf(app.getReservedMemoryMB())).append("\",\"").append((isAppInCompletedState && app.getReservedGpus() <= 0) ? UNAVAILABLE : String.valueOf(app.getReservedGpus())).append("\",\"").append(queuePercent).append("\",\"").append(clusterPercent).append("\",\"").append("<br title='").append(percent).append("'> <div class='").append(C_PROGRESSBAR).append("' title='").append(join(percent, '%')).append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE).append("' style='").append(join("width:", percent, '%')).append("'> </div> </div>").append("\",\"<a ");
        String trackingURL = app.getTrackingUrl() == null || app.getTrackingUrl().equals(UNAVAILABLE) || app.getAppState() == YarnApplicationState.NEW ? null : app.getTrackingUrl();
        String trackingUI = app.getTrackingUrl() == null || app.getTrackingUrl().equals(UNAVAILABLE) || app.getAppState() == YarnApplicationState.NEW ? "Unassigned" : Apps.isApplicationFinalState(app.getAppState()) ? "History" : "ApplicationMaster";
        appsTableData.append(trackingURL == null ? "#" : "href='" + trackingURL).append("'>").append(trackingUI).append("</a>\",").append("\"").append(blacklistedNodesCount).append("\"],\n");
    }
    if (appsTableData.charAt(appsTableData.length() - 2) == ',') {
        appsTableData.delete(appsTableData.length() - 2, appsTableData.length() - 1);
    }
    appsTableData.append("]");
    html.script().$type("text/javascript").__("var appsTableData=" + appsTableData).__();
    tbody.__().__();
}
245327.491173hadoop
public void testCSQueueMetrics() throws Exception {
    Map<String, ResourceInformation> riMap = new HashMap<>();
    ResourceInformation memory = ResourceInformation.newInstance(ResourceInformation.MEMORY_MB.getName(), ResourceInformation.MEMORY_MB.getUnits(), YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
    ResourceInformation vcores = ResourceInformation.newInstance(ResourceInformation.VCORES.getName(), ResourceInformation.VCORES.getUnits(), YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
    riMap.put(ResourceInformation.MEMORY_URI, memory);
    riMap.put(ResourceInformation.VCORES_URI, vcores);
    riMap.put(TestQueueMetricsForCustomResources.CUSTOM_RES_1, ResourceInformation.newInstance(TestQueueMetricsForCustomResources.CUSTOM_RES_1, "", 1, 10));
    ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setResourceComparator(DominantResourceCalculator.class);
    csConf.set(YarnConfiguration.RESOURCE_TYPES, TestQueueMetricsForCustomResources.CUSTOM_RES_1);
    setupQueueConfiguration(csConf);
    YarnConfiguration conf = new YarnConfiguration(csConf);
    conf.setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    MockRM rm = new MockRM(conf);
    rm.start();
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(50 * GB, 50, ImmutableMap.<String, String>builder().put(TestQueueMetricsForCustomResources.CUSTOM_RES_1, String.valueOf(1000)).build()), 1, "n1");
    RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(50 * GB, 50, ImmutableMap.<String, String>builder().put(TestQueueMetricsForCustomResources.CUSTOM_RES_1, String.valueOf(2000)).build()), 2, "n2");
    cs.handle(new NodeAddedSchedulerEvent(n1));
    cs.handle(new NodeAddedSchedulerEvent(n2));
    Map<String, Long> guaranteedCapA11 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a1").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(94, guaranteedCapA11.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapA11 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a1").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(3000, maxCapA11.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    assertEquals(10240, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getGuaranteedMB());
    assertEquals(71680, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getGuaranteedMB());
    assertEquals(102400, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getMaxCapacityMB());
    assertEquals(102400, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getMaxCapacityMB());
    Map<String, Long> guaranteedCapA = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(314, guaranteedCapA.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapA = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(3000, maxCapA.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> guaranteedCapB1 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(2126, guaranteedCapB1.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapB1 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(3000, maxCapB1.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    cs.handle(new NodeRemovedSchedulerEvent(n2));
    assertEquals(5120, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getGuaranteedMB());
    assertEquals(35840, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getGuaranteedMB());
    assertEquals(51200, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getMaxCapacityMB());
    assertEquals(51200, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getMaxCapacityMB());
    Map<String, Long> guaranteedCapA1 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(104, guaranteedCapA1.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapA1 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(1000, maxCapA1.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> guaranteedCapB11 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(708, guaranteedCapB11.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapB11 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(1000, maxCapB11.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    assertEquals(A_CAPACITY / 100, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getGuaranteedCapacity(), DELTA);
    assertEquals(A_CAPACITY / 100, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getGuaranteedAbsoluteCapacity(), DELTA);
    assertEquals(B1_CAPACITY / 100, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getGuaranteedCapacity(), DELTA);
    assertEquals((B_CAPACITY / 100) * (B1_CAPACITY / 100), ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getGuaranteedAbsoluteCapacity(), DELTA);
    assertEquals(1, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getMaxCapacity(), DELTA);
    assertEquals(1, ((CSQueueMetrics) cs.getQueue("a").getMetrics()).getMaxAbsoluteCapacity(), DELTA);
    assertEquals(1, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getMaxCapacity(), DELTA);
    assertEquals(1, ((CSQueueMetrics) cs.getQueue("b1").getMetrics()).getMaxAbsoluteCapacity(), DELTA);
    csConf.setQueues(A, new String[] { "a1", "a2", "a3" });
    csConf.setCapacity(A2, 29.5f);
    csConf.setCapacity(A3, 40.5f);
    csConf.setMaximumCapacity(A3, 50.0f);
    cs.reinitialize(csConf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), new ClientToAMTokenSecretManagerInRM(), null));
    assertEquals(1024, ((CSQueueMetrics) cs.getQueue("a2").getMetrics()).getGuaranteedMB());
    assertEquals(2048, ((CSQueueMetrics) cs.getQueue("a3").getMetrics()).getGuaranteedMB());
    assertEquals(51200, ((CSQueueMetrics) cs.getQueue("a2").getMetrics()).getMaxCapacityMB());
    assertEquals(25600, ((CSQueueMetrics) cs.getQueue("a3").getMetrics()).getMaxCapacityMB());
    Map<String, Long> guaranteedCapA2 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a2").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(30, guaranteedCapA2.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapA2 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a2").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(1000, maxCapA2.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> guaranteedCapA3 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a3").getMetrics()).getQueueMetricsForCustomResources()).getGuaranteedCapacity();
    assertEquals(42, guaranteedCapA3.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    Map<String, Long> maxCapA3 = ((CSQueueMetricsForCustomResources) ((CSQueueMetrics) cs.getQueue("a3").getMetrics()).getQueueMetricsForCustomResources()).getMaxCapacity();
    assertEquals(500, maxCapA3.get(TestQueueMetricsForCustomResources.CUSTOM_RES_1).longValue());
    rm.stop();
}
246907.11164hadoop
public void testReservationsNoneAvailable() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    Priority priorityLast = TestUtils.createMockPriority(12);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 1, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 8 * GB, 2, true, priorityLast, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(22 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(19 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(16 * GB, a.getMetrics().getAvailableMB());
    assertEquals(16 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(16 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_2, new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(16 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(11 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(21 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(21 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
}
245125.251189hadoop
public void testQueueMaxAMShare() throws Exception {
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    AllocationFileWriter.create().addQueue(new AllocationFileQueue.Builder("queue1").maxAMShare(0.2).build()).writeToFile(ALLOC_FILE);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(20480, 20), 0, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
    scheduler.handle(nodeEvent);
    scheduler.update();
    FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
    assertEquals("Queue queue1's fair share should be 0", 0, queue1.getFairShare().getMemorySize());
    createSchedulingRequest(1 * 1024, "default", "user1");
    scheduler.update();
    scheduler.handle(updateEvent);
    Resource amResource1 = Resource.newInstance(1024, 1);
    Resource amResource2 = Resource.newInstance(2048, 2);
    Resource amResource3 = Resource.newInstance(1860, 2);
    int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
    ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
    createApplicationWithAMResource(attId1, "queue1", "user1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId1);
    FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemorySize());
    assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
    createApplicationWithAMResource(attId2, "queue1", "user1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId2);
    FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application2's AM requests 1024 MB memory", 1024, app2.getAMResource().getMemorySize());
    assertEquals("Application2's AM should be running", 1, app2.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
    createApplicationWithAMResource(attId3, "queue1", "user1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId3);
    FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application3's AM resource shouldn't be updated", 0, app3.getAMResource().getMemorySize());
    assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    createSchedulingRequestExistingApplication(1024, 1, attId1);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application1 should have two running containers", 2, app1.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId1, RMAppAttemptState.FINISHED, false);
    scheduler.update();
    scheduler.handle(appRemovedEvent1);
    scheduler.handle(updateEvent);
    assertEquals("Application1's AM should be finished", 0, app1.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app1.getResourceUsage());
    assertEquals("Application3's AM should be running", 1, app3.getLiveContainers().size());
    assertEquals("Application3's AM requests 1024 MB memory", 1024, app3.getAMResource().getMemorySize());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
    createApplicationWithAMResource(attId4, "queue1", "user1", amResource2);
    createSchedulingRequestExistingApplication(2048, 2, amPriority, attId4);
    FSAppAttempt app4 = scheduler.getSchedulerApp(attId4);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application4's AM resource shouldn't be updated", 0, app4.getAMResource().getMemorySize());
    assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app4.getResourceUsage());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId5 = createAppAttemptId(5, 1);
    createApplicationWithAMResource(attId5, "queue1", "user1", amResource2);
    createSchedulingRequestExistingApplication(2048, 2, amPriority, attId5);
    FSAppAttempt app5 = scheduler.getSchedulerApp(attId5);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application5's AM resource shouldn't be updated", 0, app5.getAMResource().getMemorySize());
    assertEquals("Application5's AM should not be running", 0, app5.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app5.getResourceUsage());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent4 = new AppAttemptRemovedSchedulerEvent(attId4, RMAppAttemptState.KILLED, false);
    scheduler.handle(appRemovedEvent4);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application5's AM should not be running", 0, app5.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app5.getResourceUsage());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent2 = new AppAttemptRemovedSchedulerEvent(attId2, RMAppAttemptState.FINISHED, false);
    AppAttemptRemovedSchedulerEvent appRemovedEvent3 = new AppAttemptRemovedSchedulerEvent(attId3, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent2);
    scheduler.handle(appRemovedEvent3);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application2's AM should be finished", 0, app2.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app2.getResourceUsage());
    assertEquals("Application3's AM should be finished", 0, app3.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app3.getResourceUsage());
    assertEquals("Application5's AM should be running", 1, app5.getLiveContainers().size());
    assertEquals("Application5's AM requests 2048 MB memory", 2048, app5.getAMResource().getMemorySize());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    createSchedulingRequestExistingApplication(1024, 1, attId5);
    assertEquals("Application5's AM should have 1 container", 1, app5.getLiveContainers().size());
    RMContainer amContainer5 = (RMContainer) app5.getLiveContainers().toArray()[0];
    ContainerExpiredSchedulerEvent containerExpired = new ContainerExpiredSchedulerEvent(amContainer5.getContainerId());
    scheduler.handle(containerExpired);
    assertEquals("Application5's AM should have 0 container", 0, app5.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app5.getResourceUsage());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application5 should have 1 container", 1, app5.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId6 = createAppAttemptId(6, 1);
    createApplicationWithAMResource(attId6, "queue1", "user1", amResource3);
    createSchedulingRequestExistingApplication(1860, 2, amPriority, attId6);
    FSAppAttempt app6 = scheduler.getSchedulerApp(attId6);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application6's AM should not be running", 0, app6.getLiveContainers().size());
    assertEquals("Finished application usage should be none", Resources.none(), app6.getResourceUsage());
    assertEquals("Application6's AM resource shouldn't be updated", 0, app6.getAMResource().getMemorySize());
    assertEquals("Queue1's AM resource usage should be 2048 MB memory", 2048, queue1.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent5 = new AppAttemptRemovedSchedulerEvent(attId5, RMAppAttemptState.FINISHED, false);
    AppAttemptRemovedSchedulerEvent appRemovedEvent6 = new AppAttemptRemovedSchedulerEvent(attId6, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent5);
    scheduler.handle(appRemovedEvent6);
    scheduler.update();
    assertEquals("Queue1's AM resource usage should be 0", 0, queue1.getAmResourceUsage().getMemorySize());
}
245305.481174hadoop
private static void loadEntityData(String rootDir) throws Exception {
    File appDir = getAppDir(rootDir, "flow1", "app1", "app");
    TimelineEntity entity11 = new TimelineEntity();
    entity11.setId("id_1");
    entity11.setType("app");
    entity11.setCreatedTime(1425016502000L);
    Map<String, Object> info1 = new HashMap<String, Object>();
    info1.put("info1", "val1");
    info1.put("info2", "val5");
    entity11.addInfo(info1);
    TimelineEvent event = new TimelineEvent();
    event.setId("event_1");
    event.setTimestamp(1425016502003L);
    entity11.addEvent(event);
    Set<TimelineMetric> metrics = new HashSet<TimelineMetric>();
    TimelineMetric metric1 = new TimelineMetric();
    metric1.setId("metric1");
    metric1.setType(TimelineMetric.Type.SINGLE_VALUE);
    metric1.addValue(1425016502006L, 113);
    metrics.add(metric1);
    TimelineMetric metric2 = new TimelineMetric();
    metric2.setId("metric2");
    metric2.setType(TimelineMetric.Type.TIME_SERIES);
    metric2.addValue(1425016502016L, 34);
    metrics.add(metric2);
    entity11.setMetrics(metrics);
    Map<String, String> configs = new HashMap<String, String>();
    configs.put("config_1", "127");
    entity11.setConfigs(configs);
    entity11.addRelatesToEntity("flow", "flow1");
    entity11.addIsRelatedToEntity("type1", "tid1_1");
    writeEntityFile(entity11, appDir);
    TimelineEntity entity12 = new TimelineEntity();
    entity12.setId("id_1");
    entity12.setType("app");
    configs.clear();
    configs.put("config_2", "23");
    configs.put("config_3", "abc");
    entity12.addConfigs(configs);
    metrics.clear();
    TimelineMetric metric12 = new TimelineMetric();
    metric12.setId("metric2");
    metric12.setType(TimelineMetric.Type.TIME_SERIES);
    metric12.addValue(1425016502032L, 48);
    metric12.addValue(1425016502054L, 51);
    metrics.add(metric12);
    TimelineMetric metric3 = new TimelineMetric();
    metric3.setId("metric3");
    metric3.setType(TimelineMetric.Type.SINGLE_VALUE);
    metric3.addValue(1425016502060L, 23L);
    metrics.add(metric3);
    entity12.setMetrics(metrics);
    entity12.addIsRelatedToEntity("type1", "tid1_2");
    entity12.addIsRelatedToEntity("type2", "tid2_1`");
    TimelineEvent event15 = new TimelineEvent();
    event15.setId("event_5");
    event15.setTimestamp(1425016502017L);
    entity12.addEvent(event15);
    writeEntityFile(entity12, appDir);
    TimelineEntity entity2 = new TimelineEntity();
    entity2.setId("id_2");
    entity2.setType("app");
    entity2.setCreatedTime(1425016501050L);
    Map<String, Object> info2 = new HashMap<String, Object>();
    info1.put("info2", 4);
    entity2.addInfo(info2);
    Map<String, String> configs2 = new HashMap<String, String>();
    configs2.put("config_1", "129");
    configs2.put("config_3", "def");
    entity2.setConfigs(configs2);
    TimelineEvent event2 = new TimelineEvent();
    event2.setId("event_2");
    event2.setTimestamp(1425016501003L);
    entity2.addEvent(event2);
    Set<TimelineMetric> metrics2 = new HashSet<TimelineMetric>();
    TimelineMetric metric21 = new TimelineMetric();
    metric21.setId("metric1");
    metric21.setType(TimelineMetric.Type.SINGLE_VALUE);
    metric21.addValue(1425016501006L, 300);
    metrics2.add(metric21);
    TimelineMetric metric22 = new TimelineMetric();
    metric22.setId("metric2");
    metric22.setType(TimelineMetric.Type.TIME_SERIES);
    metric22.addValue(1425016501056L, 31);
    metric22.addValue(1425016501084L, 70);
    metrics2.add(metric22);
    TimelineMetric metric23 = new TimelineMetric();
    metric23.setId("metric3");
    metric23.setType(TimelineMetric.Type.SINGLE_VALUE);
    metric23.addValue(1425016502060L, 23L);
    metrics2.add(metric23);
    entity2.setMetrics(metrics2);
    entity2.addRelatesToEntity("flow", "flow2");
    writeEntityFile(entity2, appDir);
    TimelineEntity entity3 = new TimelineEntity();
    entity3.setId("id_3");
    entity3.setType("app");
    entity3.setCreatedTime(1425016501050L);
    Map<String, Object> info3 = new HashMap<String, Object>();
    info3.put("info2", 3.5);
    info3.put("info4", 20);
    entity3.addInfo(info3);
    Map<String, String> configs3 = new HashMap<String, String>();
    configs3.put("config_1", "123");
    configs3.put("config_3", "abc");
    entity3.setConfigs(configs3);
    TimelineEvent event3 = new TimelineEvent();
    event3.setId("event_2");
    event3.setTimestamp(1425016501003L);
    entity3.addEvent(event3);
    TimelineEvent event4 = new TimelineEvent();
    event4.setId("event_4");
    event4.setTimestamp(1425016502006L);
    entity3.addEvent(event4);
    Set<TimelineMetric> metrics3 = new HashSet<TimelineMetric>();
    TimelineMetric metric31 = new TimelineMetric();
    metric31.setId("metric1");
    metric31.setType(TimelineMetric.Type.SINGLE_VALUE);
    metric31.addValue(1425016501006L, 124);
    metrics3.add(metric31);
    TimelineMetric metric32 = new TimelineMetric();
    metric32.setId("metric2");
    metric32.setType(TimelineMetric.Type.TIME_SERIES);
    metric32.addValue(1425016501056L, 31);
    metric32.addValue(1425016501084L, 74);
    metrics3.add(metric32);
    entity3.setMetrics(metrics3);
    entity3.addIsRelatedToEntity("type1", "tid1_2");
    writeEntityFile(entity3, appDir);
    TimelineEntity entity4 = new TimelineEntity();
    entity4.setId("id_4");
    entity4.setType("app");
    entity4.setCreatedTime(1425016502050L);
    TimelineEvent event44 = new TimelineEvent();
    event44.setId("event_4");
    event44.setTimestamp(1425016502003L);
    entity4.addEvent(event44);
    writeEntityFile(entity4, appDir);
    File attemptDir = getAppDir(rootDir, "flow1", "app1", TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString());
    ApplicationAttemptEntity attempt1 = new ApplicationAttemptEntity();
    attempt1.setId("app-attempt-1");
    attempt1.setCreatedTime(1425017502003L);
    writeEntityFile(attempt1, attemptDir);
    ApplicationAttemptEntity attempt2 = new ApplicationAttemptEntity();
    attempt2.setId("app-attempt-2");
    attempt2.setCreatedTime(1425017502004L);
    writeEntityFile(attempt2, attemptDir);
    File entityDir = getAppDir(rootDir, "flow1", "app1", TimelineEntityType.YARN_CONTAINER.toString());
    ContainerEntity containerEntity1 = new ContainerEntity();
    containerEntity1.setId("container_1_1");
    containerEntity1.setParent(attempt1.getIdentifier());
    containerEntity1.setCreatedTime(1425017502003L);
    writeEntityFile(containerEntity1, entityDir);
    ContainerEntity containerEntity2 = new ContainerEntity();
    containerEntity2.setId("container_2_1");
    containerEntity2.setParent(attempt2.getIdentifier());
    containerEntity2.setCreatedTime(1425018502003L);
    writeEntityFile(containerEntity2, entityDir);
    ContainerEntity containerEntity3 = new ContainerEntity();
    containerEntity3.setId("container_2_2");
    containerEntity3.setParent(attempt2.getIdentifier());
    containerEntity3.setCreatedTime(1425018502003L);
    writeEntityFile(containerEntity3, entityDir);
    File appDir2 = getAppDir(rootDir, "flow1,flow", "app2", "app");
    TimelineEntity entity5 = new TimelineEntity();
    entity5.setId("id_5");
    entity5.setType("app");
    entity5.setCreatedTime(1425016502050L);
    writeEntityFile(entity5, appDir2);
}
245571.411184kafka
public void testLastConsumerProtocolMemberRebalanceTimeoutInConsumerGroup() {
    String groupId = "group-id";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    Uuid zarTopicId = Uuid.randomUuid();
    String zarTopicName = "zar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = Collections.singletonList(new ConsumerGroupMemberMetadataValue.ClassicProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))))));
    ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setRebalanceTimeoutMs(30000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(protocols)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build();
    ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")).setServerAssignorName("range").setRebalanceTimeoutMs(30000).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addTopic(zarTopicId, zarTopicName, 1).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(member1).withMember(member2).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    context.replay(CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
            put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }));
    context.commit();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5))));
        }
    }));
    context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(3, 4, 5)), new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(barTopicId).setPartitions(Arrays.asList(2)))));
    context.assertRebalanceTimeout(groupId, memberId2, 30000);
    MockCoordinatorTimer.ExpiredTimeout<Void, CoordinatorRecord> timeout = context.sleep(30000 + 1).get(0);
    assertEquals(consumerGroupRebalanceTimeoutKey(groupId, memberId2), timeout.key);
    byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, assignment);
        }
    };
    ClassicGroup expectedClassicGroup = new ClassicGroup(new LogContext(), groupId, STABLE, context.time, context.metrics, 11, Optional.ofNullable(ConsumerProtocol.PROTOCOL_TYPE), Optional.ofNullable("range"), Optional.ofNullable(memberId1), Optional.of(context.time.milliseconds()));
    expectedClassicGroup.add(new ClassicGroupMember(memberId1, Optional.ofNullable(member1.instanceId()), member1.clientId(), member1.clientHost(), member1.rebalanceTimeoutMs(), member1.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, member1.supportedJoinGroupRequestProtocols(), assignment));
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newGroupSubscriptionMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()));
    assertUnorderedListEquals(expectedRecords.subList(0, 2), timeout.result.records().subList(0, 2));
    assertUnorderedListEquals(expectedRecords.subList(2, 4), timeout.result.records().subList(2, 4));
    assertRecordEquals(expectedRecords.get(4), timeout.result.records().get(4));
    assertUnorderedListEquals(expectedRecords.subList(5, 7), timeout.result.records().subList(5, 7));
    assertRecordsEquals(expectedRecords.subList(7, 10), timeout.result.records().subList(7, 10));
    verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.RECONCILING, null);
    verify(context.metrics, times(1)).onClassicGroupStateTransition(null, STABLE);
    ScheduledTimeout<Void, CoordinatorRecord> heartbeatTimeout = context.timer.timeout(classicGroupHeartbeatKey(groupId, memberId1));
    assertNotNull(heartbeatTimeout);
    ScheduledTimeout<Void, CoordinatorRecord> groupJoinTimeout = context.timer.timeout(classicGroupJoinKey(groupId));
    assertNotNull(groupJoinTimeout);
    ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    assertTrue(classicGroup.isInState(PREPARING_REBALANCE));
}
243176.0742117wildfly
public static ParsedOptions determineEnvironment(String[] args, Properties systemProperties, Map<String, String> systemEnvironment, ServerEnvironment.LaunchType launchType) {
    List<String> clientArguments = new ArrayList<>();
    ParsedOptions ret = new ParsedOptions();
    ret.clientArguments = clientArguments;
    final int argsLength = args.length;
    String appClientConfig = "appclient.xml";
    boolean clientArgs = false;
    ProductConfig productConfig;
    boolean hostSet = false;
    String yamlFile = null;
    for (int i = 0; i < argsLength; i++) {
        final String arg = args[i];
        try {
            if (clientArgs) {
                clientArguments.add(arg);
            } else if (CommandLineConstants.VERSION.equals(arg) || CommandLineConstants.SHORT_VERSION.equals(arg) || CommandLineConstants.OLD_VERSION.equals(arg) || CommandLineConstants.OLD_SHORT_VERSION.equals(arg)) {
                productConfig = ProductConfig.fromFilesystemSlot(Module.getBootModuleLoader(), WildFlySecurityManager.getPropertyPrivileged(ServerEnvironment.HOME_DIR, null), null);
                STDOUT.println(productConfig.getPrettyVersionString());
                return null;
            } else if (CommandLineConstants.HELP.equals(arg) || CommandLineConstants.SHORT_HELP.equals(arg) || CommandLineConstants.OLD_HELP.equals(arg)) {
                usage();
                return null;
            } else if (CommandLineConstants.PROPERTIES.equals(arg) || CommandLineConstants.OLD_PROPERTIES.equals(arg) || CommandLineConstants.SHORT_PROPERTIES.equals(arg)) {
                if (!processProperties(arg, args[++i])) {
                    return null;
                }
            } else if (arg.startsWith(CommandLineConstants.PROPERTIES)) {
                String urlSpec = parseValue(arg, CommandLineConstants.PROPERTIES);
                if (urlSpec == null || !processProperties(arg, urlSpec)) {
                    return null;
                }
            } else if (arg.startsWith(CommandLineConstants.SHORT_PROPERTIES)) {
                String urlSpec = parseValue(arg, CommandLineConstants.SHORT_PROPERTIES);
                if (urlSpec == null || !processProperties(arg, urlSpec)) {
                    return null;
                }
            } else if (arg.startsWith(CommandLineConstants.OLD_PROPERTIES)) {
                String urlSpec = parseValue(arg, CommandLineConstants.OLD_PROPERTIES);
                if (urlSpec == null || !processProperties(arg, urlSpec)) {
                    return null;
                }
            } else if (arg.equals(CommandLineConstants.SHORT_HOST) || arg.equals(CommandLineConstants.HOST)) {
                if (ret.propertiesFile != null) {
                    throw AppClientLogger.ROOT_LOGGER.cannotSpecifyBothHostAndPropertiesFile();
                }
                hostSet = true;
                ret.hostUrl = args[++i];
            } else if (arg.startsWith(CommandLineConstants.SHORT_HOST)) {
                if (ret.propertiesFile != null) {
                    throw AppClientLogger.ROOT_LOGGER.cannotSpecifyBothHostAndPropertiesFile();
                }
                hostSet = true;
                ret.hostUrl = parseValue(arg, CommandLineConstants.SHORT_HOST);
            } else if (arg.startsWith(CommandLineConstants.HOST)) {
                if (ret.propertiesFile != null) {
                    throw AppClientLogger.ROOT_LOGGER.cannotSpecifyBothHostAndPropertiesFile();
                }
                hostSet = true;
                ret.hostUrl = parseValue(arg, CommandLineConstants.HOST);
            } else if (arg.startsWith(CommandLineConstants.CONNECTION_PROPERTIES)) {
                if (hostSet) {
                    throw AppClientLogger.ROOT_LOGGER.cannotSpecifyBothHostAndPropertiesFile();
                }
                ret.propertiesFile = parseValue(arg, CommandLineConstants.CONNECTION_PROPERTIES);
            } else if (arg.startsWith(CommandLineConstants.SYS_PROP)) {
                String name, value;
                int idx = arg.indexOf("=");
                if (idx == -1) {
                    name = arg.substring(2);
                    value = "true";
                } else {
                    name = arg.substring(2, idx);
                    value = arg.substring(idx + 1);
                }
                systemProperties.setProperty(name, value);
                WildFlySecurityManager.setPropertyPrivileged(name, value);
            } else if (arg.startsWith(CommandLineConstants.APPCLIENT_CONFIG)) {
                appClientConfig = parseValue(arg, CommandLineConstants.APPCLIENT_CONFIG);
            } else if (CommandLineConstants.SECMGR.equals(arg)) {
            } else if (ConfigurationExtensionFactory.isConfigurationExtensionSupported() && ConfigurationExtensionFactory.commandLineContainsArgument(arg)) {
                int idx = arg.indexOf("=");
                if (idx == -1) {
                    final int next = i + 1;
                    if (next < argsLength) {
                        yamlFile = args[next];
                        i++;
                    } else {
                        STDERR.println(AppClientLogger.ROOT_LOGGER.argumentExpected(arg));
                        usage();
                        return null;
                    }
                } else {
                    yamlFile = arg.substring(idx + 1);
                }
            } else {
                if (arg.startsWith("-")) {
                    STDOUT.println(AppClientLogger.ROOT_LOGGER.unknownOption(arg));
                    usage();
                    return null;
                }
                clientArgs = true;
                clientArguments.add(arg);
            }
        } catch (IndexOutOfBoundsException e) {
            STDERR.println(AppClientLogger.ROOT_LOGGER.argumentExpected(arg));
            usage();
            return null;
        }
    }
    String hostControllerName = null;
    productConfig = ProductConfig.fromFilesystemSlot(Module.getBootModuleLoader(), WildFlySecurityManager.getPropertyPrivileged(ServerEnvironment.HOME_DIR, null), systemProperties);
    ret.environment = new ServerEnvironment(hostControllerName, systemProperties, systemEnvironment, appClientConfig, null, launchType, null, productConfig, System.currentTimeMillis(), false, false, null, null, null, yamlFile);
    return ret;
}
242390.2739140wildfly
private void parseRemoteContainer(XMLExtendedStreamReader reader, PathAddress subsystemAddress, Map<PathAddress, ModelNode> operations) throws XMLStreamException {
    String name = require(reader, XMLAttribute.NAME);
    PathAddress address = subsystemAddress.append(RemoteCacheContainerResourceDefinition.pathElement(name));
    ModelNode operation = Util.createAddOperation(address);
    operations.put(address, operation);
    for (int i = 0; i < reader.getAttributeCount(); i++) {
        ParseUtils.requireNoNamespaceAttribute(reader, i);
        XMLAttribute attribute = XMLAttribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case NAME:
                {
                    break;
                }
            case CONNECTION_TIMEOUT:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.CONNECTION_TIMEOUT);
                    break;
                }
            case DEFAULT_REMOTE_CLUSTER:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.DEFAULT_REMOTE_CLUSTER);
                    break;
                }
            case KEY_SIZE_ESTIMATE:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.DeprecatedAttribute.KEY_SIZE_ESTIMATE);
                    break;
                }
            case MAX_RETRIES:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.MAX_RETRIES);
                    break;
                }
            case MODULE:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_12_0)) {
                        throw ParseUtils.unexpectedAttribute(reader, i);
                    }
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.ListAttribute.MODULES);
                    break;
                }
            case PROTOCOL_VERSION:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.PROTOCOL_VERSION);
                    break;
                }
            case SOCKET_TIMEOUT:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.SOCKET_TIMEOUT);
                    break;
                }
            case TCP_NO_DELAY:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.TCP_NO_DELAY);
                    break;
                }
            case TCP_KEEP_ALIVE:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.TCP_KEEP_ALIVE);
                    break;
                }
            case VALUE_SIZE_ESTIMATE:
                {
                    readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.DeprecatedAttribute.VALUE_SIZE_ESTIMATE);
                    break;
                }
            case STATISTICS_ENABLED:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_9_0)) {
                        readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.STATISTICS_ENABLED);
                        break;
                    }
                }
            case MODULES:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_12_0)) {
                        readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.ListAttribute.MODULES);
                        break;
                    }
                }
            case MARSHALLER:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_13_0)) {
                        readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.MARSHALLER);
                        break;
                    }
                }
            case TRANSACTION_TIMEOUT:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_13_0)) {
                        readAttribute(reader, i, operation, RemoteCacheContainerResourceDefinition.Attribute.TRANSACTION_TIMEOUT);
                        break;
                    }
                }
            default:
                {
                    throw ParseUtils.unexpectedAttribute(reader, i);
                }
        }
    }
    if (!operation.hasDefined(CacheContainerResourceDefinition.Attribute.MARSHALLER.getName())) {
        if (!this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
            operation.get(CacheContainerResourceDefinition.Attribute.MARSHALLER.getName()).set(new ModelNode(InfinispanMarshallerFactory.LEGACY.name()));
        }
    }
    addRequiredChildOperations(address, RemoteCacheContainerResourceDefinition.REQUIRED_CHILDREN, operations);
    while (reader.hasNext() && (reader.nextTag() != XMLStreamConstants.END_ELEMENT)) {
        XMLElement element = XMLElement.forName(reader.getLocalName());
        switch(element) {
            case ASYNC_THREAD_POOL:
                {
                    this.parseThreadPool(ThreadPoolResourceDefinition.CLIENT, reader, address, operations);
                    break;
                }
            case CONNECTION_POOL:
                {
                    this.parseConnectionPool(reader, address, operations);
                    break;
                }
            case INVALIDATION_NEAR_CACHE:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
                        throw ParseUtils.unexpectedElement(reader);
                    }
                    ClusteringLogger.ROOT_LOGGER.elementIgnored(reader.getLocalName());
                    ParseUtils.requireNoContent(reader);
                    break;
                }
            case REMOTE_CLUSTERS:
                {
                    this.parseRemoteClusters(reader, address, operations);
                    break;
                }
            case SECURITY:
                {
                    this.parseRemoteCacheContainerSecurity(reader, address, operations);
                    break;
                }
            case TRANSACTION:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
                        throw ParseUtils.unexpectedElement(reader);
                    }
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_8_0)) {
                        ClusteringLogger.ROOT_LOGGER.elementIgnored(reader.getLocalName());
                        ParseUtils.requireNoContent(reader);
                        break;
                    }
                }
            case PROPERTY:
                {
                    if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0) || (this.schema.since(InfinispanSubsystemSchema.VERSION_9_1) && !this.schema.since(InfinispanSubsystemSchema.VERSION_10_0))) {
                        ParseUtils.requireSingleAttribute(reader, XMLAttribute.NAME.getLocalName());
                        readElement(reader, operation, RemoteCacheContainerResourceDefinition.Attribute.PROPERTIES);
                        break;
                    }
                }
            default:
                {
                    throw ParseUtils.unexpectedElement(reader);
                }
        }
    }
}
242562.2132155wildfly
private void parseXADataSource_1_0(XMLExtendedStreamReader reader, final List<ModelNode> list, final ModelNode parentAddress) throws XMLStreamException, ParserException, ValidateException {
    String poolName = null;
    final ModelNode operation = new ModelNode();
    operation.get(OP).set(ADD);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        if (!isNoNamespaceAttribute(reader, i)) {
            throw unexpectedAttribute(reader, i);
        }
        final XaDataSource.Attribute attribute = XaDataSource.Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case ENABLED:
                {
                    final String value = rawAttributeText(reader, ENABLED.getXmlName());
                    if (value != null) {
                        ENABLED.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case JNDI_NAME:
                {
                    final String jndiName = rawAttributeText(reader, JNDI_NAME.getXmlName());
                    JNDI_NAME.parseAndSetParameter(jndiName, operation, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = rawAttributeText(reader, POOLNAME_NAME);
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    final String value = rawAttributeText(reader, USE_JAVA_CONTEXT.getXmlName());
                    if (value != null) {
                        USE_JAVA_CONTEXT.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case SPY:
                {
                    final String value = rawAttributeText(reader, SPY.getXmlName());
                    if (value != null) {
                        SPY.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            case USE_CCM:
                {
                    final String value = rawAttributeText(reader, USE_CCM.getXmlName());
                    if (value != null) {
                        USE_CCM.parseAndSetParameter(value, operation, reader);
                    }
                    break;
                }
            default:
                throw ParseUtils.unexpectedAttribute(reader, i);
        }
    }
    final ModelNode dsAddress = parentAddress.clone();
    dsAddress.add(XA_DATASOURCE, poolName);
    dsAddress.protect();
    operation.get(OP_ADDR).set(dsAddress);
    List<ModelNode> xadatasourcePropertiesOperations = new ArrayList<ModelNode>(0);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (DataSources.Tag.forName(reader.getLocalName()) == DataSources.Tag.XA_DATASOURCE) {
                        list.add(operation);
                        list.addAll(xadatasourcePropertiesOperations);
                        return;
                    } else {
                        if (XaDataSource.Tag.forName(reader.getLocalName()) == XaDataSource.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(XaDataSource.Tag.forName(reader.getLocalName())) {
                        case XA_DATASOURCE_PROPERTY:
                            {
                                String name = rawAttributeText(reader, "name");
                                String value = rawElementText(reader);
                                final ModelNode configOperation = new ModelNode();
                                configOperation.get(OP).set(ADD);
                                final ModelNode configAddress = dsAddress.clone();
                                configAddress.add(XADATASOURCE_PROPERTIES.getName(), name);
                                configAddress.protect();
                                configOperation.get(OP_ADDR).set(configAddress);
                                XADATASOURCE_PROPERTY_VALUE.parseAndSetParameter(value, configOperation, reader);
                                xadatasourcePropertiesOperations.add(configOperation);
                                break;
                            }
                        case XA_DATASOURCE_CLASS:
                            {
                                String value = rawElementText(reader);
                                XA_DATASOURCE_CLASS.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DRIVER:
                            {
                                String value = rawElementText(reader);
                                DATASOURCE_DRIVER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case XA_POOL:
                            {
                                parseXaPool(reader, operation);
                                break;
                            }
                        case NEW_CONNECTION_SQL:
                            {
                                String value = rawElementText(reader);
                                NEW_CONNECTION_SQL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_DELIMITER:
                            {
                                String value = rawElementText(reader);
                                URL_DELIMITER.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_PROPERTY:
                            {
                                String value = rawElementText(reader);
                                URL_PROPERTY.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case URL_SELECTOR_STRATEGY_CLASS_NAME:
                            {
                                String value = rawElementText(reader);
                                URL_SELECTOR_STRATEGY_CLASS_NAME.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case TRANSACTION_ISOLATION:
                            {
                                String value = rawElementText(reader);
                                TRANSACTION_ISOLATION.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case SECURITY:
                            {
                                parseDsSecurity(reader, operation);
                                break;
                            }
                        case STATEMENT:
                            {
                                parseStatementSettings(reader, operation);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOutSettings(reader, operation);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidationSettings(reader, operation);
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, operation);
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
242172.5936145wildfly
protected void parseConnectionDefinitions_4_0(final XMLExtendedStreamReader reader, final Map<String, ModelNode> map, final Map<String, HashMap<String, ModelNode>> configMap, final boolean isXa) throws XMLStreamException, ParserException, ValidateException {
    final ModelNode connectionDefinitionNode = new ModelNode();
    connectionDefinitionNode.get(OP).set(ADD);
    String poolName = null;
    String jndiName = null;
    int attributeSize = reader.getAttributeCount();
    boolean poolDefined = Boolean.FALSE;
    for (int i = 0; i < attributeSize; i++) {
        ConnectionDefinition.Attribute attribute = ConnectionDefinition.Attribute.forName(reader.getAttributeLocalName(i));
        String value = reader.getAttributeValue(i);
        switch(attribute) {
            case ENABLED:
                {
                    ENABLED.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case CONNECTABLE:
                {
                    CONNECTABLE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case TRACKING:
                {
                    TRACKING.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case JNDI_NAME:
                {
                    jndiName = value;
                    JNDI_NAME.parseAndSetParameter(jndiName, connectionDefinitionNode, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = value;
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    USE_JAVA_CONTEXT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case USE_CCM:
                {
                    USE_CCM.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case SHARABLE:
                {
                    SHARABLE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case ENLISTMENT:
                {
                    ENLISTMENT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case CLASS_NAME:
                {
                    CLASS_NAME.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case MCP:
                {
                    MCP.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case ENLISTMENT_TRACE:
                ENLISTMENT_TRACE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                break;
            default:
                throw ParseUtils.unexpectedAttribute(reader, i);
        }
    }
    if (poolName == null || poolName.trim().equals("")) {
        if (jndiName != null && jndiName.trim().length() != 0) {
            if (jndiName.contains("/")) {
                poolName = jndiName.substring(jndiName.lastIndexOf("/") + 1);
            } else {
                poolName = jndiName.substring(jndiName.lastIndexOf(":") + 1);
            }
        } else {
            throw ParseUtils.missingRequired(reader, EnumSet.of(ConnectionDefinition.Attribute.JNDI_NAME));
        }
    }
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (Activation.Tag.forName(reader.getLocalName()) == Activation.Tag.CONNECTION_DEFINITION) {
                        map.put(poolName, connectionDefinitionNode);
                        return;
                    } else {
                        if (ConnectionDefinition.Tag.forName(reader.getLocalName()) == ConnectionDefinition.Tag.UNKNOWN) {
                            throw ParseUtils.unexpectedEndElement(reader);
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(ConnectionDefinition.Tag.forName(reader.getLocalName())) {
                        case CONFIG_PROPERTY:
                            {
                                if (!configMap.containsKey(poolName)) {
                                    configMap.put(poolName, new HashMap<String, ModelNode>(0));
                                }
                                parseConfigProperties(reader, configMap.get(poolName));
                                break;
                            }
                        case SECURITY:
                            {
                                parseSecuritySettings(reader, connectionDefinitionNode);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOut(reader, connectionDefinitionNode);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidation(reader, connectionDefinitionNode);
                                break;
                            }
                        case XA_POOL:
                            {
                                if (!isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parseXaPool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case POOL:
                            {
                                if (isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parsePool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, connectionDefinitionNode);
                                break;
                            }
                        default:
                            throw ParseUtils.unexpectedElement(reader);
                    }
                    break;
                }
        }
    }
    throw ParseUtils.unexpectedEndElement(reader);
}
244265.4722142wildfly
private void createService(ServiceTarget serviceTarget, ServiceContainer container) throws Exception {
    InputStream is = null;
    InputStream isIj = null;
    List<ConfigProperty> properties = new ArrayList<ConfigProperty>();
    try {
        StringBuilder connectorClassname = new StringBuilder();
        StringBuilder connectorParams = new StringBuilder();
        TransportConfigOperationHandlers.processConnectorBindings(Arrays.asList(connectors), socketBindings, outboundSocketBindings);
        for (TransportConfiguration tc : connectors) {
            if (tc == null) {
                throw MessagingLogger.ROOT_LOGGER.connectorNotDefined("null");
            }
            if (connectorClassname.length() > 0) {
                connectorClassname.append(",");
                connectorParams.append(",");
            }
            connectorClassname.append(tc.getFactoryClassName());
            Map<String, Object> params = tc.getParams();
            boolean multiple = false;
            for (Map.Entry<String, Object> entry : params.entrySet()) {
                if (multiple) {
                    connectorParams.append(";");
                }
                connectorParams.append(entry.getKey()).append("=").append(entry.getValue());
                multiple = true;
            }
        }
        if (connectorClassname.length() > 0) {
            properties.add(simpleProperty15(CONNECTOR_CLASSNAME, STRING_TYPE, connectorClassname.toString()));
        }
        if (connectorParams.length() > 0) {
            properties.add(simpleProperty15(CONNECTION_PARAMETERS, STRING_TYPE, connectorParams.toString()));
        }
        if (discoveryGroupConfiguration != null) {
            final String dgName = discoveryGroupConfiguration.getName();
            final String key = "discovery" + dgName;
            final DiscoveryGroupConfiguration config;
            if (commandDispatcherFactories.containsKey(key)) {
                BroadcastCommandDispatcherFactory commandDispatcherFactory = commandDispatcherFactories.get(key).get();
                String clusterName = clusterNames.get(key);
                config = JGroupsDiscoveryGroupAdd.createDiscoveryGroupConfiguration(name, discoveryGroupConfiguration, commandDispatcherFactory, clusterName);
            } else {
                final SocketBinding binding = groupBindings.get(key).get();
                if (binding == null) {
                    throw MessagingLogger.ROOT_LOGGER.failedToFindDiscoverySocketBinding(dgName);
                }
                config = SocketDiscoveryGroupAdd.createDiscoveryGroupConfiguration(name, discoveryGroupConfiguration, binding);
                binding.getSocketBindings().getNamedRegistry().registerBinding(ManagedBinding.Factory.createSimpleManagedBinding(binding));
            }
            BroadcastEndpointFactory bgCfg = config.getBroadcastEndpointFactory();
            if (bgCfg instanceof UDPBroadcastEndpointFactory) {
                UDPBroadcastEndpointFactory udpCfg = (UDPBroadcastEndpointFactory) bgCfg;
                properties.add(simpleProperty15(GROUP_ADDRESS, STRING_TYPE, udpCfg.getGroupAddress()));
                properties.add(simpleProperty15(GROUP_PORT, INTEGER_TYPE, "" + udpCfg.getGroupPort()));
                properties.add(simpleProperty15(DISCOVERY_LOCAL_BIND_ADDRESS, STRING_TYPE, "" + udpCfg.getLocalBindAddress()));
            } else if (bgCfg instanceof CommandDispatcherBroadcastEndpointFactory) {
                String external = "/" + name + ":discovery" + dgName;
                properties.add(simpleProperty15(JGROUPS_CHANNEL_NAME, STRING_TYPE, jgroupsClusterName));
                properties.add(simpleProperty15(JGROUPS_CHANNEL_REF_NAME, STRING_TYPE, external));
            }
            properties.add(simpleProperty15(DISCOVERY_INITIAL_WAIT_TIMEOUT, LONG_TYPE, "" + config.getDiscoveryInitialWaitTimeout()));
            properties.add(simpleProperty15(REFRESH_TIMEOUT, LONG_TYPE, "" + config.getRefreshTimeout()));
        }
        boolean hasReconnect = false;
        final List<ConfigProperty> inboundProperties = new ArrayList<>();
        final List<ConfigProperty> outboundProperties = new ArrayList<>();
        final String reconnectName = ConnectionFactoryAttributes.Pooled.RECONNECT_ATTEMPTS_PROP_NAME;
        for (PooledConnectionFactoryConfigProperties adapterParam : adapterParams) {
            hasReconnect |= reconnectName.equals(adapterParam.getName());
            ConfigProperty p = simpleProperty15(adapterParam.getName(), adapterParam.getType(), adapterParam.getValue());
            if (adapterParam.getName().equals(REBALANCE_CONNECTIONS_PROP_NAME)) {
                boolean rebalanceConnections = Boolean.parseBoolean(adapterParam.getValue());
                if (rebalanceConnections) {
                    inboundProperties.add(p);
                }
            } else {
                if (null == adapterParam.getConfigType()) {
                    properties.add(p);
                } else {
                    switch(adapterParam.getConfigType()) {
                        case INBOUND:
                            inboundProperties.add(p);
                            break;
                        case OUTBOUND:
                            outboundProperties.add(p);
                            break;
                        default:
                            properties.add(p);
                            break;
                    }
                }
            }
        }
        if (!hasReconnect) {
            properties.add(simpleProperty15(reconnectName, Integer.class.getName(), DEFAULT_MAX_RECONNECTS));
        }
        configureCredential(properties);
        WildFlyRecoveryRegistry.container = container;
        OutboundResourceAdapter outbound = createOutbound(outboundProperties);
        InboundResourceAdapter inbound = createInbound(inboundProperties);
        ResourceAdapter ra = createResourceAdapter15(properties, outbound, inbound);
        Connector cmd = createConnector15(ra);
        TransactionSupportEnum transactionSupport = getTransactionSupport(txSupport);
        ConnectionDefinition common = createConnDef(transactionSupport, bindInfo.getBindName(), minPoolSize, maxPoolSize, managedConnectionPoolClassName, enlistmentTrace);
        Activation activation = createActivation(common, transactionSupport);
        ResourceAdapterActivatorService activator = new ResourceAdapterActivatorService(cmd, activation, ExternalPooledConnectionFactoryService.class.getClassLoader(), name);
        activator.setBindInfo(bindInfo);
        activator.setCreateBinderService(createBinderService);
        activator.addJndiAliases(jndiAliases);
        final ServiceBuilder sb = Services.addServerExecutorDependency(serviceTarget.addService(getResourceAdapterActivatorsServiceName(name), activator), activator.getExecutorServiceInjector()).addDependency(ConnectorServices.IRONJACAMAR_MDR, AS7MetadataRepository.class, activator.getMdrInjector()).addDependency(ConnectorServices.RA_REPOSITORY_SERVICE, ResourceAdapterRepository.class, activator.getRaRepositoryInjector()).addDependency(ConnectorServices.MANAGEMENT_REPOSITORY_SERVICE, ManagementRepository.class, activator.getManagementRepositoryInjector()).addDependency(ConnectorServices.RESOURCE_ADAPTER_REGISTRY_SERVICE, ResourceAdapterDeploymentRegistry.class, activator.getRegistryInjector()).addDependency(ConnectorServices.TRANSACTION_INTEGRATION_SERVICE, TransactionIntegration.class, activator.getTxIntegrationInjector()).addDependency(ConnectorServices.CONNECTOR_CONFIG_SERVICE, JcaSubsystemConfiguration.class, activator.getConfigInjector()).addDependency(ConnectorServices.CCM_SERVICE, CachedConnectionManager.class, activator.getCcmInjector());
        sb.requires(NamingService.SERVICE_NAME);
        sb.requires(capabilityServiceSupport.getCapabilityServiceName(MessagingServices.LOCAL_TRANSACTION_PROVIDER_CAPABILITY));
        sb.requires(ConnectorServices.BOOTSTRAP_CONTEXT_SERVICE.append("default"));
        sb.setInitialMode(ServiceController.Mode.PASSIVE).install();
        serviceTarget.addService(ConnectorServices.RESOURCE_ADAPTER_DEPLOYER_SERVICE_PREFIX.append(name), Service.NULL).install();
    } finally {
        if (is != null) {
            is.close();
        }
        if (isIj != null) {
            isIj.close();
        }
    }
}
244314.524134wildfly
private void createService(ServiceTarget serviceTarget, ServiceContainer container) throws Exception {
    InputStream is = null;
    InputStream isIj = null;
    List<ConfigProperty> properties = new ArrayList<ConfigProperty>();
    try {
        StringBuilder connectorClassname = new StringBuilder();
        StringBuilder connectorParams = new StringBuilder();
        if (discoveryGroupName == null && connectors.isEmpty() && pickAnyConnectors) {
            Set<String> connectorNames = ActiveMQServer.class.cast(activeMQBroker.getValue().getDelegate()).getConfiguration().getConnectorConfigurations().keySet();
            if (!connectorNames.isEmpty()) {
                String connectorName = connectorNames.iterator().next();
                MessagingLogger.ROOT_LOGGER.connectorForPooledConnectionFactory(name, connectorName);
                connectors.add(connectorName);
            }
        }
        for (String connector : connectors) {
            TransportConfiguration tc = ActiveMQServer.class.cast(activeMQBroker.getValue().getDelegate()).getConfiguration().getConnectorConfigurations().get(connector);
            if (tc == null) {
                throw MessagingLogger.ROOT_LOGGER.connectorNotDefined(connector);
            }
            if (connectorClassname.length() > 0) {
                connectorClassname.append(",");
                connectorParams.append(",");
            }
            connectorClassname.append(tc.getFactoryClassName());
            Map<String, Object> params = tc.getParams();
            boolean multiple = false;
            for (Map.Entry<String, Object> entry : params.entrySet()) {
                if (multiple) {
                    connectorParams.append(";");
                }
                connectorParams.append(entry.getKey()).append("=").append(entry.getValue());
                multiple = true;
            }
        }
        if (connectorClassname.length() > 0) {
            properties.add(simpleProperty15(CONNECTOR_CLASSNAME, STRING_TYPE, connectorClassname.toString()));
        }
        if (connectorParams.length() > 0) {
            properties.add(simpleProperty15(CONNECTION_PARAMETERS, STRING_TYPE, connectorParams.toString()));
        }
        if (discoveryGroupName != null) {
            DiscoveryGroupConfiguration discoveryGroupConfiguration = ActiveMQServer.class.cast(activeMQBroker.getValue().getDelegate()).getConfiguration().getDiscoveryGroupConfigurations().get(discoveryGroupName);
            BroadcastEndpointFactory bgCfg = discoveryGroupConfiguration.getBroadcastEndpointFactory();
            if (bgCfg instanceof UDPBroadcastEndpointFactory) {
                UDPBroadcastEndpointFactory udpCfg = (UDPBroadcastEndpointFactory) bgCfg;
                properties.add(simpleProperty15(GROUP_ADDRESS, STRING_TYPE, udpCfg.getGroupAddress()));
                properties.add(simpleProperty15(GROUP_PORT, INTEGER_TYPE, "" + udpCfg.getGroupPort()));
                properties.add(simpleProperty15(DISCOVERY_LOCAL_BIND_ADDRESS, STRING_TYPE, "" + udpCfg.getLocalBindAddress()));
            } else if (bgCfg instanceof CommandDispatcherBroadcastEndpointFactory) {
                properties.add(simpleProperty15(JGROUPS_CHANNEL_NAME, STRING_TYPE, jgroupsChannelName));
                properties.add(simpleProperty15(JGROUPS_CHANNEL_REF_NAME, STRING_TYPE, serverName + "/discovery" + discoveryGroupConfiguration.getName()));
            }
            properties.add(simpleProperty15(DISCOVERY_INITIAL_WAIT_TIMEOUT, LONG_TYPE, "" + discoveryGroupConfiguration.getDiscoveryInitialWaitTimeout()));
            properties.add(simpleProperty15(REFRESH_TIMEOUT, LONG_TYPE, "" + discoveryGroupConfiguration.getRefreshTimeout()));
        }
        boolean hasReconnect = false;
        final List<ConfigProperty> inboundProperties = new ArrayList<>();
        final List<ConfigProperty> outboundProperties = new ArrayList<>();
        final String reconnectName = ConnectionFactoryAttributes.Pooled.RECONNECT_ATTEMPTS_PROP_NAME;
        for (PooledConnectionFactoryConfigProperties adapterParam : adapterParams) {
            hasReconnect |= reconnectName.equals(adapterParam.getName());
            ConfigProperty p = simpleProperty15(adapterParam.getName(), adapterParam.getType(), adapterParam.getValue());
            if (adapterParam.getName().equals(REBALANCE_CONNECTIONS_PROP_NAME)) {
                boolean rebalanceConnections = Boolean.parseBoolean(adapterParam.getValue());
                if (rebalanceConnections) {
                    inboundProperties.add(p);
                }
            } else {
                if (null == adapterParam.getConfigType()) {
                    properties.add(p);
                } else
                    switch(adapterParam.getConfigType()) {
                        case INBOUND:
                            inboundProperties.add(p);
                            break;
                        case OUTBOUND:
                            outboundProperties.add(p);
                            break;
                        default:
                            properties.add(p);
                            break;
                    }
            }
        }
        if (!hasReconnect) {
            properties.add(simpleProperty15(reconnectName, Integer.class.getName(), DEFAULT_MAX_RECONNECTS));
        }
        configureCredential(properties);
        inboundProperties.add(simpleProperty15("queuePrefix", String.class.getName(), JMS_QUEUE_PREFIX));
        inboundProperties.add(simpleProperty15("topicPrefix", String.class.getName(), JMS_TOPIC_PREFIX));
        WildFlyRecoveryRegistry.container = container;
        OutboundResourceAdapter outbound = createOutbound(outboundProperties);
        InboundResourceAdapter inbound = createInbound(inboundProperties);
        ResourceAdapter ra = createResourceAdapter15(properties, outbound, inbound);
        Connector cmd = createConnector15(ra);
        TransactionSupportEnum transactionSupport = getTransactionSupport(txSupport);
        ConnectionDefinition common = createConnDef(transactionSupport, bindInfo.getBindName(), minPoolSize, maxPoolSize, managedConnectionPoolClassName, enlistmentTrace);
        Activation activation = createActivation(common, transactionSupport);
        ResourceAdapterActivatorService activator = new ResourceAdapterActivatorService(cmd, activation, PooledConnectionFactoryService.class.getClassLoader(), name);
        activator.setBindInfo(bindInfo);
        activator.setCreateBinderService(createBinderService);
        activator.addJndiAliases(jndiAliases);
        final ServiceBuilder sb = Services.addServerExecutorDependency(serviceTarget.addService(getResourceAdapterActivatorsServiceName(name), activator), activator.getExecutorServiceInjector()).addDependency(ConnectorServices.IRONJACAMAR_MDR, AS7MetadataRepository.class, activator.getMdrInjector()).addDependency(ConnectorServices.RA_REPOSITORY_SERVICE, ResourceAdapterRepository.class, activator.getRaRepositoryInjector()).addDependency(ConnectorServices.MANAGEMENT_REPOSITORY_SERVICE, ManagementRepository.class, activator.getManagementRepositoryInjector()).addDependency(ConnectorServices.RESOURCE_ADAPTER_REGISTRY_SERVICE, ResourceAdapterDeploymentRegistry.class, activator.getRegistryInjector()).addDependency(ConnectorServices.TRANSACTION_INTEGRATION_SERVICE, TransactionIntegration.class, activator.getTxIntegrationInjector()).addDependency(ConnectorServices.CONNECTOR_CONFIG_SERVICE, JcaSubsystemConfiguration.class, activator.getConfigInjector()).addDependency(ConnectorServices.CCM_SERVICE, CachedConnectionManager.class, activator.getCcmInjector());
        sb.requires(ActiveMQActivationService.getServiceName(getActiveMQServiceName(serverName)));
        sb.requires(NamingService.SERVICE_NAME);
        sb.requires(MessagingServices.getCapabilityServiceName(MessagingServices.LOCAL_TRANSACTION_PROVIDER_CAPABILITY));
        sb.requires(ConnectorServices.BOOTSTRAP_CONTEXT_SERVICE.append("default"));
        sb.setInitialMode(ServiceController.Mode.PASSIVE).install();
        serviceTarget.addService(ConnectorServices.RESOURCE_ADAPTER_DEPLOYER_SERVICE_PREFIX.append(name), Service.NULL).install();
    } finally {
        if (is != null)
            is.close();
        if (isIj != null)
            isIj.close();
    }
}
2414992.261135wildfly
public void testAttributeValues() {
    Assert.assertNotEquals(ServerDefinition.GLOBAL_MAX_DISK_USAGE.getName(), ServerDefinition.GLOBAL_MAX_DISK_USAGE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMaxDiskUsage());
    Assert.assertNotEquals(ServerDefinition.GLOBAL_MAX_MEMORY_SIZE.getName(), ServerDefinition.GLOBAL_MAX_MEMORY_SIZE.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultMaxGlobalSize());
    Assert.assertNotEquals(ServerDefinition.JOURNAL_POOL_FILES.getName(), ServerDefinition.JOURNAL_POOL_FILES.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultJournalPoolFiles());
    Assert.assertEquals(BridgeDefinition.INITIAL_CONNECT_ATTEMPTS.getName(), BridgeDefinition.INITIAL_CONNECT_ATTEMPTS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultBridgeInitialConnectAttempts());
    Assert.assertEquals(BridgeDefinition.RECONNECT_ATTEMPTS.getName(), BridgeDefinition.RECONNECT_ATTEMPTS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultBridgeReconnectAttempts());
    Assert.assertEquals(BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE.getName(), BridgeDefinition.RECONNECT_ATTEMPTS_ON_SAME_NODE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultBridgeConnectSameNode());
    Assert.assertEquals(BridgeDefinition.USE_DUPLICATE_DETECTION.getName(), BridgeDefinition.USE_DUPLICATE_DETECTION.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultBridgeDuplicateDetection());
    Assert.assertEquals(ClusterConnectionDefinition.CHECK_PERIOD.getName(), ClusterConnectionDefinition.CHECK_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultClusterFailureCheckPeriod());
    Assert.assertEquals(ClusterConnectionDefinition.CONNECTION_TTL.getName(), ClusterConnectionDefinition.CONNECTION_TTL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultClusterConnectionTtl());
    Assert.assertEquals(ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS.getName(), ClusterConnectionDefinition.INITIAL_CONNECT_ATTEMPTS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultClusterInitialConnectAttempts());
    Assert.assertEquals(ClusterConnectionDefinition.MAX_HOPS.getName(), ClusterConnectionDefinition.MAX_HOPS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultClusterMaxHops());
    Assert.assertEquals(ClusterConnectionDefinition.MAX_RETRY_INTERVAL.getName(), ClusterConnectionDefinition.MAX_RETRY_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultClusterMaxRetryInterval());
    Assert.assertEquals(ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE.getName(), ClusterConnectionDefinition.MESSAGE_LOAD_BALANCING_TYPE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultClusterMessageLoadBalancingType());
    Assert.assertEquals(ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS.getName(), ClusterConnectionDefinition.NOTIFICATION_ATTEMPTS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultClusterNotificationAttempts());
    Assert.assertEquals(ClusterConnectionDefinition.NOTIFICATION_INTERVAL.getName(), ClusterConnectionDefinition.NOTIFICATION_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultClusterNotificationInterval());
    Assert.assertEquals(ClusterConnectionDefinition.RECONNECT_ATTEMPTS.getName(), ClusterConnectionDefinition.RECONNECT_ATTEMPTS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultClusterReconnectAttempts());
    Assert.assertEquals(ClusterConnectionDefinition.RETRY_INTERVAL.getName(), ClusterConnectionDefinition.RETRY_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultClusterRetryInterval());
    Assert.assertEquals(ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER.getName(), ClusterConnectionDefinition.RETRY_INTERVAL_MULTIPLIER.getDefaultValue().asDouble(), ActiveMQDefaultConfiguration.getDefaultClusterRetryIntervalMultiplier(), 0);
    Assert.assertEquals(CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE.getName(), CommonAttributes.BRIDGE_CONFIRMATION_WINDOW_SIZE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultBridgeConfirmationWindowSize());
    Assert.assertEquals(CommonAttributes.CALL_TIMEOUT.getName(), CommonAttributes.CALL_TIMEOUT.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_CALL_TIMEOUT);
    Assert.assertEquals(CommonAttributes.CHECK_PERIOD.getName(), CommonAttributes.CHECK_PERIOD.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD);
    Assert.assertEquals(CommonAttributes.CONNECTION_TTL.getName(), CommonAttributes.CONNECTION_TTL.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_CONNECTION_TTL);
    Assert.assertEquals(CommonAttributes.HA.getName(), CommonAttributes.HA.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_HA);
    Assert.assertEquals(CommonAttributes.MAX_RETRY_INTERVAL.getName(), CommonAttributes.MAX_RETRY_INTERVAL.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_MAX_RETRY_INTERVAL);
    Assert.assertEquals(CommonAttributes.MIN_LARGE_MESSAGE_SIZE.getName(), CommonAttributes.MIN_LARGE_MESSAGE_SIZE.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_MIN_LARGE_MESSAGE_SIZE);
    Assert.assertEquals(CommonAttributes.RETRY_INTERVAL.getName(), CommonAttributes.RETRY_INTERVAL.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_RETRY_INTERVAL);
    Assert.assertEquals(CommonAttributes.RETRY_INTERVAL_MULTIPLIER.getName(), CommonAttributes.RETRY_INTERVAL_MULTIPLIER.getDefaultValue().asDouble(), ActiveMQClient.DEFAULT_RETRY_INTERVAL_MULTIPLIER, 0);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.AUTO_GROUP.getName(), ConnectionFactoryAttributes.Common.AUTO_GROUP.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_AUTO_GROUP);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE.getName(), ConnectionFactoryAttributes.Common.BLOCK_ON_ACKNOWLEDGE.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_BLOCK_ON_ACKNOWLEDGE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND.getName(), ConnectionFactoryAttributes.Common.BLOCK_ON_DURABLE_SEND.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_BLOCK_ON_DURABLE_SEND);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND.getName(), ConnectionFactoryAttributes.Common.BLOCK_ON_NON_DURABLE_SEND.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_BLOCK_ON_NON_DURABLE_SEND);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT.getName(), ConnectionFactoryAttributes.Common.CACHE_LARGE_MESSAGE_CLIENT.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_CACHE_LARGE_MESSAGE_CLIENT);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES.getName(), ConnectionFactoryAttributes.Common.COMPRESS_LARGE_MESSAGES.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_COMPRESS_LARGE_MESSAGES);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE.getName(), ConnectionFactoryAttributes.Common.CONFIRMATION_WINDOW_SIZE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_CONFIRMATION_WINDOW_SIZE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME.getName(), ConnectionFactoryAttributes.Common.CONNECTION_LOAD_BALANCING_CLASS_NAME.getDefaultValue().asString(), ActiveMQClient.DEFAULT_CONNECTION_LOAD_BALANCING_POLICY_CLASS_NAME);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE.getName(), ConnectionFactoryAttributes.Common.CONSUMER_MAX_RATE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_CONSUMER_MAX_RATE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE.getName(), ConnectionFactoryAttributes.Common.CONSUMER_WINDOW_SIZE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_CONSUMER_WINDOW_SIZE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE.getName(), ConnectionFactoryAttributes.Common.DUPS_OK_BATCH_SIZE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_ACK_BATCH_SIZE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION.getName(), ConnectionFactoryAttributes.Common.FAILOVER_ON_INITIAL_CONNECTION.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_FAILOVER_ON_INITIAL_CONNECTION);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE.getName(), ConnectionFactoryAttributes.Common.PRE_ACKNOWLEDGE.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_PRE_ACKNOWLEDGE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE.getName(), ConnectionFactoryAttributes.Common.PRODUCER_MAX_RATE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_PRODUCER_MAX_RATE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE.getName(), ConnectionFactoryAttributes.Common.PRODUCER_WINDOW_SIZE.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_PRODUCER_WINDOW_SIZE);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS.getName(), ConnectionFactoryAttributes.Common.RECONNECT_ATTEMPTS.getDefaultValue().asInt(), ActiveMQClient.DEFAULT_RECONNECT_ATTEMPTS);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE.getName(), ConnectionFactoryAttributes.Common.SCHEDULED_THREAD_POOL_MAX_SIZE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultScheduledThreadPoolMaxSize());
    Assert.assertEquals(ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS.getName(), ConnectionFactoryAttributes.Common.USE_GLOBAL_POOLS.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_USE_GLOBAL_POOLS);
    Assert.assertEquals(ConnectionFactoryAttributes.Common.USE_TOPOLOGY.getName(), ConnectionFactoryAttributes.Common.USE_TOPOLOGY.getDefaultValue().asBoolean(), ActiveMQClient.DEFAULT_USE_TOPOLOGY_FOR_LOADBALANCING);
    Assert.assertEquals(DivertDefinition.EXCLUSIVE.getName(), DivertDefinition.EXCLUSIVE.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultDivertExclusive());
    Assert.assertEquals(GroupingHandlerDefinition.GROUP_TIMEOUT.getName(), GroupingHandlerDefinition.GROUP_TIMEOUT.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultGroupingHandlerGroupTimeout());
    Assert.assertEquals(GroupingHandlerDefinition.REAPER_PERIOD.getName(), GroupingHandlerDefinition.REAPER_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultGroupingHandlerReaperPeriod());
    Assert.assertEquals(GroupingHandlerDefinition.TIMEOUT.getName(), GroupingHandlerDefinition.TIMEOUT.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultGroupingHandlerTimeout());
    Assert.assertEquals(HAAttributes.ALLOW_FAILBACK.getName(), HAAttributes.ALLOW_FAILBACK.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultAllowAutoFailback());
    Assert.assertEquals(HAAttributes.BACKUP_PORT_OFFSET.getName(), HAAttributes.BACKUP_PORT_OFFSET.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultHapolicyBackupPortOffset());
    Assert.assertEquals(HAAttributes.BACKUP_REQUEST_RETRIES.getName(), HAAttributes.BACKUP_REQUEST_RETRIES.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultHapolicyBackupRequestRetries());
    Assert.assertEquals(HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL.getName(), HAAttributes.BACKUP_REQUEST_RETRY_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultHapolicyBackupRequestRetryInterval());
    Assert.assertEquals(HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN.getName(), HAAttributes.FAILOVER_ON_SERVER_SHUTDOWN.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultFailoverOnServerShutdown());
    Assert.assertEquals(HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT.getName(), HAAttributes.INITIAL_REPLICATION_SYNC_TIMEOUT.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultInitialReplicationSyncTimeout());
    Assert.assertEquals(HAAttributes.MAX_BACKUPS.getName(), HAAttributes.MAX_BACKUPS.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultHapolicyMaxBackups());
    Assert.assertEquals(HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE.getName(), HAAttributes.MAX_SAVED_REPLICATED_JOURNAL_SIZE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMaxSavedReplicatedJournalsSize());
    Assert.assertEquals(HAAttributes.REQUEST_BACKUP.getName(), HAAttributes.REQUEST_BACKUP.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultHapolicyRequestBackup());
    Assert.assertEquals(HAAttributes.RESTART_BACKUP.getName(), HAAttributes.RESTART_BACKUP.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultRestartBackup());
    Assert.assertEquals(JGroupsBroadcastGroupDefinition.BROADCAST_PERIOD.getName(), JGroupsBroadcastGroupDefinition.BROADCAST_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultBroadcastPeriod());
    Assert.assertEquals(JGroupsDiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT.getName(), JGroupsDiscoveryGroupDefinition.INITIAL_WAIT_TIMEOUT.getDefaultValue().asLong(), ActiveMQClient.DEFAULT_DISCOVERY_INITIAL_WAIT_TIMEOUT);
    Assert.assertEquals(JGroupsDiscoveryGroupDefinition.REFRESH_TIMEOUT.getName(), JGroupsDiscoveryGroupDefinition.REFRESH_TIMEOUT.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultBroadcastRefreshTimeout());
    Assert.assertEquals(LegacyConnectionFactoryDefinition.AUTO_GROUP.getName(), LegacyConnectionFactoryDefinition.AUTO_GROUP.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_AUTO_GROUP);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE.getName(), LegacyConnectionFactoryDefinition.BLOCK_ON_ACKNOWLEDGE.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_BLOCK_ON_ACKNOWLEDGE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND.getName(), LegacyConnectionFactoryDefinition.BLOCK_ON_DURABLE_SEND.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_BLOCK_ON_DURABLE_SEND);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND.getName(), LegacyConnectionFactoryDefinition.BLOCK_ON_NON_DURABLE_SEND.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_BLOCK_ON_NON_DURABLE_SEND);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT.getName(), LegacyConnectionFactoryDefinition.CACHE_LARGE_MESSAGE_CLIENT.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_CACHE_LARGE_MESSAGE_CLIENT);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD.getName(), LegacyConnectionFactoryDefinition.CLIENT_FAILURE_CHECK_PERIOD.getDefaultValue().asLong(), HornetQClient.DEFAULT_CLIENT_FAILURE_CHECK_PERIOD);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES.getName(), LegacyConnectionFactoryDefinition.COMPRESS_LARGE_MESSAGES.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_COMPRESS_LARGE_MESSAGES);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE.getName(), LegacyConnectionFactoryDefinition.CONFIRMATION_WINDOW_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_CONFIRMATION_WINDOW_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME.getName(), LegacyConnectionFactoryDefinition.CONNECTION_LOAD_BALANCING_CLASS_NAME.getDefaultValue().asString(), HornetQClient.DEFAULT_CONNECTION_LOAD_BALANCING_POLICY_CLASS_NAME);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CONNECTION_TTL.getName(), LegacyConnectionFactoryDefinition.CONNECTION_TTL.getDefaultValue().asLong(), HornetQClient.DEFAULT_CONNECTION_TTL);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE.getName(), LegacyConnectionFactoryDefinition.CONSUMER_MAX_RATE.getDefaultValue().asInt(), HornetQClient.DEFAULT_CONSUMER_MAX_RATE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE.getName(), LegacyConnectionFactoryDefinition.CONSUMER_WINDOW_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_CONSUMER_WINDOW_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE.getName(), LegacyConnectionFactoryDefinition.DUPS_OK_BATCH_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_ACK_BATCH_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION.getName(), LegacyConnectionFactoryDefinition.FAILOVER_ON_INITIAL_CONNECTION.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_FAILOVER_ON_INITIAL_CONNECTION);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS.getName(), LegacyConnectionFactoryDefinition.INITIAL_CONNECT_ATTEMPTS.getDefaultValue().asInt(), HornetQClient.INITIAL_CONNECT_ATTEMPTS);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE.getName(), LegacyConnectionFactoryDefinition.INITIAL_MESSAGE_PACKET_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_INITIAL_MESSAGE_PACKET_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL.getName(), LegacyConnectionFactoryDefinition.MAX_RETRY_INTERVAL.getDefaultValue().asLong(), HornetQClient.DEFAULT_MAX_RETRY_INTERVAL);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE.getName(), LegacyConnectionFactoryDefinition.MIN_LARGE_MESSAGE_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_MIN_LARGE_MESSAGE_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE.getName(), LegacyConnectionFactoryDefinition.PRE_ACKNOWLEDGE.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_PRE_ACKNOWLEDGE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE.getName(), LegacyConnectionFactoryDefinition.PRODUCER_MAX_RATE.getDefaultValue().asInt(), HornetQClient.DEFAULT_PRODUCER_MAX_RATE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE.getName(), LegacyConnectionFactoryDefinition.PRODUCER_WINDOW_SIZE.getDefaultValue().asInt(), HornetQClient.DEFAULT_PRODUCER_WINDOW_SIZE);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS.getName(), LegacyConnectionFactoryDefinition.RECONNECT_ATTEMPTS.getDefaultValue().asInt(), HornetQClient.DEFAULT_RECONNECT_ATTEMPTS);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.RETRY_INTERVAL.getName(), LegacyConnectionFactoryDefinition.RETRY_INTERVAL.getDefaultValue().asLong(), HornetQClient.DEFAULT_RETRY_INTERVAL);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER.getName(), LegacyConnectionFactoryDefinition.RETRY_INTERVAL_MULTIPLIER.getDefaultValue().asDouble(), HornetQClient.DEFAULT_RETRY_INTERVAL_MULTIPLIER, 0);
    Assert.assertEquals(LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS.getName(), LegacyConnectionFactoryDefinition.USE_GLOBAL_POOLS.getDefaultValue().asBoolean(), HornetQClient.DEFAULT_USE_GLOBAL_POOLS);
    Assert.assertEquals(ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED.getName(), ServerDefinition.ASYNC_CONNECTION_EXECUTION_ENABLED.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultAsyncConnectionExecutionEnabled());
    Assert.assertEquals(ServerDefinition.CLUSTER_PASSWORD.getName(), ServerDefinition.CLUSTER_PASSWORD.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultClusterPassword());
    Assert.assertEquals(ServerDefinition.CLUSTER_USER.getName(), ServerDefinition.CLUSTER_USER.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultClusterUser());
    Assert.assertEquals(ServerDefinition.CONNECTION_TTL_OVERRIDE.getName(), ServerDefinition.CONNECTION_TTL_OVERRIDE.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultConnectionTtlOverride());
    Assert.assertEquals(ServerDefinition.CREATE_BINDINGS_DIR.getName(), ServerDefinition.CREATE_BINDINGS_DIR.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultCreateBindingsDir());
    Assert.assertEquals(ServerDefinition.CREATE_JOURNAL_DIR.getName(), ServerDefinition.CREATE_JOURNAL_DIR.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultCreateJournalDir());
    Assert.assertEquals(ServerDefinition.DISK_SCAN_PERIOD.getName(), ServerDefinition.DISK_SCAN_PERIOD.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultDiskScanPeriod());
    Assert.assertEquals(ServerDefinition.ID_CACHE_SIZE.getName(), ServerDefinition.ID_CACHE_SIZE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultIdCacheSize());
    Assert.assertEquals(ServerDefinition.JMX_DOMAIN.getName(), ServerDefinition.JMX_DOMAIN.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultJmxDomain());
    Assert.assertEquals(ServerDefinition.JOURNAL_BINDINGS_TABLE.getName(), ServerDefinition.JOURNAL_BINDINGS_TABLE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultBindingsTableName());
    Assert.assertEquals(ServerDefinition.JOURNAL_COMPACT_MIN_FILES.getName(), ServerDefinition.JOURNAL_COMPACT_MIN_FILES.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultJournalCompactMinFiles());
    Assert.assertEquals(ServerDefinition.JOURNAL_COMPACT_PERCENTAGE.getName(), ServerDefinition.JOURNAL_COMPACT_PERCENTAGE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultJournalCompactPercentage());
    Assert.assertEquals(ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT.getName(), ServerDefinition.JOURNAL_FILE_OPEN_TIMEOUT.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultJournalFileOpenTimeout());
    Assert.assertEquals(ServerDefinition.JOURNAL_FILE_SIZE.getName(), ServerDefinition.JOURNAL_FILE_SIZE.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultJournalFileSize());
    Assert.assertEquals(ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION.getName(), ServerDefinition.JOURNAL_JDBC_LOCK_EXPIRATION.getDefaultValue().asInt() * 1000, ActiveMQDefaultConfiguration.getDefaultJdbcLockExpirationMillis());
    Assert.assertEquals(ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD.getName(), ServerDefinition.JOURNAL_JDBC_LOCK_RENEW_PERIOD.getDefaultValue().asInt() * 1000, ActiveMQDefaultConfiguration.getDefaultJdbcLockRenewPeriodMillis());
    Assert.assertEquals(ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT.getName(), ServerDefinition.JOURNAL_JDBC_NETWORK_TIMEOUT.getDefaultValue().asInt() * 1000, ActiveMQDefaultConfiguration.getDefaultJdbcNetworkTimeout());
    Assert.assertEquals(ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE.getName(), ServerDefinition.JOURNAL_LARGE_MESSAGES_TABLE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultLargeMessagesTableName());
    Assert.assertEquals(ServerDefinition.JOURNAL_MESSAGES_TABLE.getName(), ServerDefinition.JOURNAL_MESSAGES_TABLE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultMessageTableName());
    Assert.assertEquals(ServerDefinition.JOURNAL_MIN_FILES.getName(), ServerDefinition.JOURNAL_MIN_FILES.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultJournalMinFiles());
    Assert.assertEquals(ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE.getName(), ServerDefinition.JOURNAL_NODE_MANAGER_STORE_TABLE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultNodeManagerStoreTableName());
    Assert.assertEquals(ServerDefinition.JOURNAL_PAGE_STORE_TABLE.getName(), ServerDefinition.JOURNAL_PAGE_STORE_TABLE.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultPageStoreTableName());
    Assert.assertEquals(ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL.getName(), ServerDefinition.JOURNAL_SYNC_NON_TRANSACTIONAL.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultJournalSyncNonTransactional());
    Assert.assertEquals(ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL.getName(), ServerDefinition.JOURNAL_SYNC_TRANSACTIONAL.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultJournalSyncTransactional());
    Assert.assertEquals(ServerDefinition.LOG_JOURNAL_WRITE_RATE.getName(), ServerDefinition.LOG_JOURNAL_WRITE_RATE.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultJournalLogWriteRate());
    Assert.assertEquals(ServerDefinition.MANAGEMENT_ADDRESS.getName(), ServerDefinition.MANAGEMENT_ADDRESS.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultManagementAddress().toString());
    Assert.assertEquals(ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS.getName(), ServerDefinition.MANAGEMENT_NOTIFICATION_ADDRESS.getDefaultValue().asString(), ActiveMQDefaultConfiguration.getDefaultManagementNotificationAddress().toString());
    Assert.assertEquals(ServerDefinition.MEMORY_MEASURE_INTERVAL.getName(), ServerDefinition.MEMORY_MEASURE_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultMemoryMeasureInterval());
    Assert.assertEquals(ServerDefinition.MEMORY_WARNING_THRESHOLD.getName(), ServerDefinition.MEMORY_WARNING_THRESHOLD.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMemoryWarningThreshold());
    Assert.assertEquals(ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY.getName(), ServerDefinition.MESSAGE_COUNTER_MAX_DAY_HISTORY.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMessageCounterMaxDayHistory());
    Assert.assertEquals(ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD.getName(), ServerDefinition.MESSAGE_COUNTER_SAMPLE_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultMessageCounterSamplePeriod());
    Assert.assertEquals(ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD.getName(), ServerDefinition.MESSAGE_EXPIRY_SCAN_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultMessageExpiryScanPeriod());
    Assert.assertEquals(ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY.getName(), ServerDefinition.MESSAGE_EXPIRY_THREAD_PRIORITY.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMessageExpiryThreadPriority());
    Assert.assertEquals(ServerDefinition.PAGE_MAX_CONCURRENT_IO.getName(), ServerDefinition.PAGE_MAX_CONCURRENT_IO.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultMaxConcurrentPageIo());
    Assert.assertEquals(ServerDefinition.PERSISTENCE_ENABLED.getName(), ServerDefinition.PERSISTENCE_ENABLED.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultPersistenceEnabled());
    Assert.assertEquals(ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY.getName(), ServerDefinition.PERSIST_DELIVERY_COUNT_BEFORE_DELIVERY.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultPersistDeliveryCountBeforeDelivery());
    Assert.assertEquals(ServerDefinition.PERSIST_ID_CACHE.getName(), ServerDefinition.PERSIST_ID_CACHE.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultPersistIdCache());
    Assert.assertEquals(ServerDefinition.SECURITY_ENABLED.getName(), ServerDefinition.SECURITY_ENABLED.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultSecurityEnabled());
    Assert.assertEquals(ServerDefinition.SECURITY_INVALIDATION_INTERVAL.getName(), ServerDefinition.SECURITY_INVALIDATION_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultSecurityInvalidationInterval());
    Assert.assertEquals(ServerDefinition.SERVER_DUMP_INTERVAL.getName(), ServerDefinition.SERVER_DUMP_INTERVAL.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultServerDumpInterval());
    Assert.assertEquals(ServerDefinition.THREAD_POOL_MAX_SIZE.getName(), ServerDefinition.THREAD_POOL_MAX_SIZE.getDefaultValue().asInt(), ActiveMQDefaultConfiguration.getDefaultThreadPoolMaxSize());
    Assert.assertEquals(ServerDefinition.TRANSACTION_TIMEOUT.getName(), ServerDefinition.TRANSACTION_TIMEOUT.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultTransactionTimeout());
    Assert.assertEquals(ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD.getName(), ServerDefinition.TRANSACTION_TIMEOUT_SCAN_PERIOD.getDefaultValue().asLong(), ActiveMQDefaultConfiguration.getDefaultTransactionTimeoutScanPeriod());
    Assert.assertEquals(ServerDefinition.WILD_CARD_ROUTING_ENABLED.getName(), ServerDefinition.WILD_CARD_ROUTING_ENABLED.getDefaultValue().asBoolean(), ActiveMQDefaultConfiguration.isDefaultWildcardRoutingEnabled());
}
242936.8814185wildfly
 static void installRuntimeServices(CapabilityServiceTarget target, ExpressionResolver resolver, PathAddress address, ModelNode model) throws OperationFailedException {
    final CookieConfig sessionCookieConfig = SessionCookieDefinition.getConfig(resolver, model.get(SessionCookieDefinition.PATH_ELEMENT.getKeyValuePair()));
    final CookieConfig affinityCookieConfig = AffinityCookieDefinition.getConfig(resolver, model.get(AffinityCookieDefinition.PATH_ELEMENT.getKeyValuePair()));
    final CrawlerSessionManagerConfig crawlerSessionManagerConfig = CrawlerSessionManagementDefinition.getConfig(resolver, model.get(CrawlerSessionManagementDefinition.PATH_ELEMENT.getKeyValuePair()));
    final boolean persistentSessions = PersistentSessionsDefinition.isEnabled(model.get(PersistentSessionsDefinition.PATH_ELEMENT.getKeyValuePair()));
    final boolean allowNonStandardWrappers = ServletContainerDefinition.ALLOW_NON_STANDARD_WRAPPERS.resolveModelAttribute(resolver, model).asBoolean();
    final boolean proactiveAuth = ServletContainerDefinition.PROACTIVE_AUTHENTICATION.resolveModelAttribute(resolver, model).asBoolean();
    final String bufferCache = ServletContainerDefinition.DEFAULT_BUFFER_CACHE.resolveModelAttribute(resolver, model).asString();
    final boolean disableFileWatchService = ServletContainerDefinition.DISABLE_FILE_WATCH_SERVICE.resolveModelAttribute(resolver, model).asBoolean();
    final boolean disableSessionIdReususe = ServletContainerDefinition.DISABLE_SESSION_ID_REUSE.resolveModelAttribute(resolver, model).asBoolean();
    JSPConfig jspConfig = JspDefinition.getConfig(resolver, model.get(JspDefinition.PATH_ELEMENT.getKeyValuePair()));
    final String stackTracesString = ServletContainerDefinition.STACK_TRACE_ON_ERROR.resolveModelAttribute(resolver, model).asString();
    final ModelNode defaultEncodingValue = ServletContainerDefinition.DEFAULT_ENCODING.resolveModelAttribute(resolver, model);
    final String defaultEncoding = defaultEncodingValue.isDefined() ? defaultEncodingValue.asString() : null;
    final boolean useListenerEncoding = ServletContainerDefinition.USE_LISTENER_ENCODING.resolveModelAttribute(resolver, model).asBoolean();
    final boolean ignoreFlush = ServletContainerDefinition.IGNORE_FLUSH.resolveModelAttribute(resolver, model).asBoolean();
    final boolean eagerFilterInit = ServletContainerDefinition.EAGER_FILTER_INIT.resolveModelAttribute(resolver, model).asBoolean();
    final boolean disableCachingForSecuredPages = ServletContainerDefinition.DISABLE_CACHING_FOR_SECURED_PAGES.resolveModelAttribute(resolver, model).asBoolean();
    final int sessionIdLength = ServletContainerDefinition.SESSION_ID_LENGTH.resolveModelAttribute(resolver, model).asInt();
    final int fileCacheMetadataSize = ServletContainerDefinition.FILE_CACHE_METADATA_SIZE.resolveModelAttribute(resolver, model).asInt();
    final int fileCacheMaxFileSize = ServletContainerDefinition.FILE_CACHE_MAX_FILE_SIZE.resolveModelAttribute(resolver, model).asInt();
    final ModelNode fileCacheTtlNode = ServletContainerDefinition.FILE_CACHE_TIME_TO_LIVE.resolveModelAttribute(resolver, model);
    final Integer fileCacheTimeToLive = fileCacheTtlNode.isDefined() ? fileCacheTtlNode.asInt() : null;
    final int defaultCookieVersion = ServletContainerDefinition.DEFAULT_COOKIE_VERSION.resolveModelAttribute(resolver, model).asInt();
    final boolean preservePathOnForward = ServletContainerDefinition.PRESERVE_PATH_ON_FORWARD.resolveModelAttribute(resolver, model).asBoolean();
    boolean orphanSessionAllowed = ServletContainerDefinition.ORPHAN_SESSION_ALLOWED.resolveModelAttribute(resolver, model).asBoolean();
    Boolean directoryListingEnabled = ServletContainerDefinition.DIRECTORY_LISTING.resolveModelAttribute(resolver, model).asBooleanOrNull();
    Integer maxSessions = ServletContainerDefinition.MAX_SESSIONS.resolveModelAttribute(resolver, model).asIntOrNull();
    final int sessionTimeout = ServletContainerDefinition.DEFAULT_SESSION_TIMEOUT.resolveModelAttribute(resolver, model).asInt();
    WebsocketsDefinition.WebSocketInfo webSocketInfo = WebsocketsDefinition.getConfig(resolver, model.get(WebsocketsDefinition.PATH_ELEMENT.getKeyValuePair()));
    Map<String, String> mimeMappings = resolveMimeMappings(resolver, model);
    List<String> welcomeFiles = resolveWelcomeFiles(model);
    final CapabilityServiceBuilder<?> builder = target.addCapability(ServletContainerDefinition.SERVLET_CONTAINER_CAPABILITY);
    final Supplier<SessionPersistenceManager> sessionPersistenceManager = persistentSessions ? builder.requires(AbstractPersistentSessionManager.SERVICE_NAME) : null;
    final Supplier<DirectBufferCache> directBufferCache = bufferCache != null ? builder.requires(BufferCacheService.SERVICE_NAME.append(bufferCache)) : null;
    final Supplier<ByteBufferPool> byteBufferPool = webSocketInfo != null ? builder.requiresCapability(Capabilities.CAPABILITY_BYTE_BUFFER_POOL, ByteBufferPool.class, webSocketInfo.getBufferPool()) : null;
    final Supplier<XnioWorker> xnioWorker = webSocketInfo != null ? builder.requiresCapability(Capabilities.REF_IO_WORKER, XnioWorker.class, webSocketInfo.getWorker()) : null;
    ServletStackTraces traces = ServletStackTraces.valueOf(stackTracesString.toUpperCase(Locale.ENGLISH).replace('-', '_'));
    ServletContainer container = ServletContainer.Factory.newInstance();
    ServletContainerService service = new ServletContainerService() {

        @Override
        public ServletContainer getServletContainer() {
            return container;
        }

        @Override
        public boolean isAllowNonStandardWrappers() {
            return allowNonStandardWrappers;
        }

        @Override
        public JSPConfig getJspConfig() {
            return jspConfig;
        }

        @Override
        public ServletStackTraces getStackTraces() {
            return traces;
        }

        @Override
        public CookieConfig getSessionCookieConfig() {
            return sessionCookieConfig;
        }

        @Override
        public CookieConfig getAffinityCookieConfig() {
            return affinityCookieConfig;
        }

        @Override
        public DirectBufferCache getBufferCache() {
            return (directBufferCache != null) ? directBufferCache.get() : null;
        }

        @Override
        public boolean isDisableCachingForSecuredPages() {
            return disableCachingForSecuredPages;
        }

        @Override
        public boolean isDispatchWebsocketInvocationToWorker() {
            return (webSocketInfo != null) && webSocketInfo.isDispatchToWorker();
        }

        @Override
        public boolean isPerMessageDeflate() {
            return (webSocketInfo != null) && webSocketInfo.isPerMessageDeflate();
        }

        @Override
        public int getDeflaterLevel() {
            return (webSocketInfo != null) ? webSocketInfo.getDeflaterLevel() : -1;
        }

        @Override
        public boolean isWebsocketsEnabled() {
            return webSocketInfo != null;
        }

        @Override
        public boolean isDisableSessionIdReuse() {
            return disableSessionIdReususe;
        }

        @Override
        public SessionPersistenceManager getSessionPersistenceManager() {
            return (sessionPersistenceManager != null) ? sessionPersistenceManager.get() : null;
        }

        @Override
        public XnioWorker getWebsocketsWorker() {
            return (xnioWorker != null) ? xnioWorker.get() : null;
        }

        @Override
        public ByteBufferPool getWebsocketsBufferPool() {
            return (byteBufferPool != null) ? byteBufferPool.get() : null;
        }

        @Override
        public String getDefaultEncoding() {
            return defaultEncoding;
        }

        @Override
        public boolean isUseListenerEncoding() {
            return useListenerEncoding;
        }

        @Override
        public boolean isIgnoreFlush() {
            return ignoreFlush;
        }

        @Override
        public boolean isEagerFilterInit() {
            return eagerFilterInit;
        }

        @Override
        public int getDefaultSessionTimeout() {
            return sessionTimeout;
        }

        @Override
        public Map<String, String> getMimeMappings() {
            return mimeMappings;
        }

        @Override
        public List<String> getWelcomeFiles() {
            return welcomeFiles;
        }

        @Override
        public Boolean getDirectoryListingEnabled() {
            return directoryListingEnabled;
        }

        @Override
        public boolean isProactiveAuth() {
            return proactiveAuth;
        }

        @Override
        public int getSessionIdLength() {
            return sessionIdLength;
        }

        @Override
        public Integer getMaxSessions() {
            return maxSessions;
        }

        @Override
        public boolean isDisableFileWatchService() {
            return disableFileWatchService;
        }

        @Override
        public CrawlerSessionManagerConfig getCrawlerSessionManagerConfig() {
            return crawlerSessionManagerConfig;
        }

        @Override
        public int getFileCacheMetadataSize() {
            return fileCacheMetadataSize;
        }

        @Override
        public int getFileCacheMaxFileSize() {
            return fileCacheMaxFileSize;
        }

        @Override
        public Integer getFileCacheTimeToLive() {
            return fileCacheTimeToLive;
        }

        @Override
        public int getDefaultCookieVersion() {
            return defaultCookieVersion;
        }

        @Override
        public boolean isPreservePathOnForward() {
            return preservePathOnForward;
        }

        @Override
        public boolean isOrphanSessionAllowed() {
            return orphanSessionAllowed;
        }
    };
    builder.setInstance(Service.newInstance(builder.provides(ServletContainerDefinition.SERVLET_CONTAINER_CAPABILITY), service));
    builder.setInitialMode(ServiceController.Mode.ON_DEMAND);
    builder.install();
}
243427.0531132wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (!WeldDeploymentMarker.isPartOfWeldDeployment(deploymentUnit)) {
        return;
    }
    if (deploymentUnit.getParent() != null) {
        return;
    }
    final Set<String> componentClassNames = new HashSet<>();
    final ServiceLoader<ComponentSupport> supportServices = ServiceLoader.load(ComponentSupport.class, WildFlySecurityManager.getClassLoaderPrivileged(ExternalBeanArchiveProcessor.class));
    final String beanArchiveIdPrefix = deploymentUnit.getName() + ".external.";
    final Set<AnnotationType> beanDefiningAnnotations = new HashSet<>(deploymentUnit.getAttachment(WeldAttachments.BEAN_DEFINING_ANNOTATIONS));
    List<DeploymentUnit> subDeployments = deploymentUnit.getAttachmentList(Attachments.SUB_DEPLOYMENTS);
    List<DeploymentUnit> deploymentUnits = new ArrayList<>(subDeployments.size() + 1);
    deploymentUnits.add(deploymentUnit);
    deploymentUnits.addAll(subDeployments);
    List<ClassLoader> loaders = new ArrayList<>(deploymentUnits.size() + 1);
    loaders.add(WildFlySecurityManager.getClassLoaderPrivileged(WeldDeploymentProcessor.class));
    for (DeploymentUnit unit : deploymentUnits) {
        loaders.add(unit.getAttachment(Attachments.MODULE).getClassLoader());
    }
    BeansXmlParser parser = new PropertyReplacingBeansXmlParser(deploymentUnit, Utils.getRootDeploymentUnit(deploymentUnit).getAttachment(WeldConfiguration.ATTACHMENT_KEY).isLegacyEmptyBeansXmlTreatment());
    final HashSet<URL> existing = new HashSet<URL>();
    final Set<String> depUnitNames = new HashSet<>();
    final String prefix = "deployment.";
    for (DeploymentUnit deployment : deploymentUnits) {
        depUnitNames.add(prefix + deployment.getName());
        try {
            final ExplicitBeanArchiveMetadataContainer weldDeploymentMetadata = deployment.getAttachment(ExplicitBeanArchiveMetadataContainer.ATTACHMENT_KEY);
            if (weldDeploymentMetadata != null) {
                for (ExplicitBeanArchiveMetadata md : weldDeploymentMetadata.getBeanArchiveMetadata().values()) {
                    existing.add(md.getBeansXmlFile().toURL());
                    if (md.getAdditionalBeansXmlFile() != null) {
                        existing.add(md.getAdditionalBeansXmlFile().toURL());
                    }
                }
            }
        } catch (MalformedURLException e) {
            throw new DeploymentUnitProcessingException(e);
        }
        EEModuleDescription moduleDesc = deployment.getAttachment(org.jboss.as.ee.component.Attachments.EE_MODULE_DESCRIPTION);
        if (moduleDesc != null) {
            for (ComponentDescription component : moduleDesc.getComponentDescriptions()) {
                for (ComponentSupport support : supportServices) {
                    if (!support.isDiscoveredExternalType(component)) {
                        componentClassNames.add(component.getComponentClassName());
                        break;
                    }
                }
            }
        }
    }
    Iterable<ModuleServicesProvider> moduleServicesProviders = ServiceLoader.load(ModuleServicesProvider.class, new CompositeClassLoader(loaders));
    Map<String, Map<URL, URL>> exportedResourcesCache = new HashMap<>();
    for (DeploymentUnit deployment : deploymentUnits) {
        final Module module = deployment.getAttachment(Attachments.MODULE);
        if (module == null) {
            return;
        }
        for (DependencySpec dep : module.getDependencies()) {
            if (!(dep instanceof ModuleDependencySpec)) {
                continue;
            }
            if (depUnitNames.contains(((ModuleDependencySpec) dep).getName())) {
                continue;
            }
            final Module dependency = loadModuleDependency(dep);
            if (dependency == null) {
                continue;
            }
            Map<URL, URL> resourcesMap = findExportedResources(dependency, exportedResourcesCache);
            if (!resourcesMap.isEmpty()) {
                List<BeanDeploymentArchiveImpl> moduleBdas = new ArrayList<>();
                for (Entry<URL, URL> entry : resourcesMap.entrySet()) {
                    URL beansXmlUrl = entry.getKey();
                    if (existing.contains(beansXmlUrl)) {
                        continue;
                    }
                    if (beansXmlUrl.toString().contains("jsf-impl-2.2")) {
                        continue;
                    }
                    if (beansXmlUrl.toString().contains("resteasy-cdi")) {
                        continue;
                    }
                    if (!dep.getImportFilter().accept("META-INF")) {
                        continue;
                    }
                    WeldLogger.DEPLOYMENT_LOGGER.debugf("Found external beans.xml: %s", beansXmlUrl.toString());
                    final BeansXml beansXml = parseBeansXml(beansXmlUrl, parser, deploymentUnit);
                    if (BeanDiscoveryMode.NONE.equals(beansXml.getBeanDiscoveryMode())) {
                        continue;
                    }
                    Map<String, List<String>> allAndBeanClasses = discover(beansXml.getBeanDiscoveryMode(), beansXmlUrl, entry.getValue(), beanDefiningAnnotations);
                    Collection<String> discoveredBeanClasses = allAndBeanClasses.get(BEAN_CLASSES);
                    Collection<String> allKnownClasses = allAndBeanClasses.get(ALL_KNOWN_CLASSES);
                    if (discoveredBeanClasses == null) {
                        continue;
                    }
                    discoveredBeanClasses.removeAll(componentClassNames);
                    final BeanDeploymentArchiveImpl bda = new BeanDeploymentArchiveImpl(new HashSet<String>(discoveredBeanClasses), new HashSet<String>(allKnownClasses), beansXml, dependency, beanArchiveIdPrefix + beansXmlUrl.toExternalForm(), BeanArchiveType.EXTERNAL);
                    WeldLogger.DEPLOYMENT_LOGGER.beanArchiveDiscovered(bda);
                    for (Entry<Class<? extends Service>, Service> moduleService : ServiceLoaders.loadModuleServices(moduleServicesProviders, deploymentUnit, deployment, module, null).entrySet()) {
                        bda.getServices().add(moduleService.getKey(), Reflections.cast(moduleService.getValue()));
                    }
                    deploymentUnit.addToAttachmentList(WeldAttachments.ADDITIONAL_BEAN_DEPLOYMENT_MODULES, bda);
                    moduleBdas.add(bda);
                    existing.add(beansXmlUrl);
                }
                for (BeanDeploymentArchiveImpl i : moduleBdas) {
                    for (BeanDeploymentArchiveImpl j : moduleBdas) {
                        if (i != j) {
                            i.addBeanDeploymentArchive(j);
                        }
                    }
                }
            }
        }
    }
}
252397.117274cassandra
public void visitMethodInsn(int opcode, String owner, String name, String descriptor, boolean isInterface) {
    boolean isFirstMethodInsn = !hasSeenAnyMethodInsn;
    hasSeenAnyMethodInsn = true;
    if (globalMethods && opcode == Opcodes.INVOKESTATIC && owner.startsWith("org/apache/cassandra/utils/") && ((owner.equals("org/apache/cassandra/utils/concurrent/WaitQueue") && name.equals("newWaitQueue")) || (owner.equals("org/apache/cassandra/utils/concurrent/CountDownLatch") && name.equals("newCountDownLatch")) || (owner.equals("org/apache/cassandra/utils/concurrent/Condition") && name.equals("newOneTimeCondition")) || (owner.equals("org/apache/cassandra/utils/concurrent/BlockingQueues") && name.equals("newBlockingQueue")) || (owner.equals("org/apache/cassandra/utils/concurrent/Semaphore") && (name.equals("newSemaphore") || name.equals("newFairSemaphore"))))) {
        transformer.witness(GLOBAL_METHOD);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfGlobalMethods$Global", name, descriptor, false);
    } else if (globalMethods && ((opcode == Opcodes.INVOKEVIRTUAL && owner.equals("java/util/concurrent/TimeUnit") && name.equals("sleep")))) {
        transformer.witness(GLOBAL_METHOD);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, "(Ljava/lang/Object;J)V", false);
    } else if (globalMethods && ((opcode == Opcodes.INVOKESTATIC && (owner.startsWith("org/apache/cassandra/utils/") && ((owner.equals("org/apache/cassandra/utils/Clock") && name.equals("waitUntil")) || (owner.equals("org/apache/cassandra/utils/concurrent/Awaitable$SyncAwaitable") && name.equals("waitUntil"))) || !deterministic && owner.equals("java/lang/System") && name.equals("identityHashCode") || owner.equals("java/util/UUID") && name.equals("randomUUID") || owner.equals("com/google/common/util/concurrent/Uninterruptibles") && name.equals("sleepUninterruptibly") || owner.equals("sun/misc/Unsafe") && name.equals("getUnsafe"))))) {
        transformer.witness(GLOBAL_METHOD);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
    } else if ((globalMethods || deterministic) && opcode == Opcodes.INVOKESTATIC && ((owner.equals("java/util/concurrent/ThreadLocalRandom") && (name.equals("getProbe") || name.equals("advanceProbe") || name.equals("localInit"))) || (owner.equals("java/util/concurrent/atomic/Striped64") && (name.equals("getProbe") || name.equals("advanceProbe"))))) {
        transformer.witness(GLOBAL_METHOD);
        if (name.equals("getProbe"))
            super.visitLdcInsn(0);
        else if (name.equals("advanceProbe"))
            super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
    } else if (globalMethods && opcode == Opcodes.INVOKESPECIAL && owner.equals("java/util/IdentityHashMap") && name.equals("<init>")) {
        transformer.witness(IDENTITY_HASH_MAP);
        super.visitMethodInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptedIdentityHashMap", name, descriptor, false);
    } else if (globalMethods && opcode == Opcodes.INVOKESPECIAL && owner.equals("java/util/concurrent/ConcurrentHashMap") && name.equals("<init>") && !(transformer.className().equals("org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap") && methodName.equals("<init>") && isFirstMethodInsn)) {
        transformer.witness(CONCURRENT_HASH_MAP);
        super.visitMethodInsn(opcode, "org/apache/cassandra/simulator/systems/InterceptibleConcurrentHashMap", name, descriptor, false);
    } else if (lockSupport && opcode == Opcodes.INVOKESTATIC && owner.equals("java/util/concurrent/locks/LockSupport") && (name.startsWith("park") || name.equals("unpark"))) {
        transformer.witness(TransformationKind.LOCK_SUPPORT);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
    } else if (globalClock && opcode == Opcodes.INVOKESTATIC && name.equals("timestampMicros") && owner.equals("org/apache/cassandra/utils/FBUtilities")) {
        transformer.witness(GLOBAL_METHOD);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/SimulatedTime$Global", "nextGlobalMonotonicMicros", descriptor, false);
    } else if (systemClock && opcode == Opcodes.INVOKESTATIC && owner.equals("java/lang/System") && (name.equals("nanoTime") || name.equals("currentTimeMillis"))) {
        transformer.witness(GLOBAL_METHOD);
        super.visitMethodInsn(Opcodes.INVOKESTATIC, "org/apache/cassandra/simulator/systems/InterceptorOfSystemMethods$Global", name, descriptor, false);
    } else {
        super.visitMethodInsn(opcode, owner, name, descriptor, isInterface);
    }
}
254199.391173cassandra
public void testMapOperation() throws Throwable {
    createTable("CREATE TABLE %s (k int, c int, l text, " + "m map<text, text>, " + "fm frozen<map<text, text>>, " + "sm map<text, text> STATIC, " + "fsm frozen<map<text, text>> STATIC, " + "o int, PRIMARY KEY (k, c))");
    execute("INSERT INTO %s(k, c, l, m, fm, sm, fsm, o) VALUES (0, 0, 'foobar', ?, ?, ?, ?, 42)", map("22", "value22", "333", "value333"), map("1", "fvalue1", "22", "fvalue22", "333", "fvalue333"), map("22", "svalue22", "333", "svalue333"), map("1", "fsvalue1", "22", "fsvalue22", "333", "fsvalue333"));
    execute("INSERT INTO %s(k, c, l, m, fm, sm, fsm, o) VALUES (2, 0, 'row2', ?, ?, ?, ?, 88)", map("22", "2value22", "333", "2value333"), map("1", "2fvalue1", "22", "2fvalue22", "333", "2fvalue333"), map("22", "2svalue22", "333", "2svalue333"), map("1", "2fsvalue1", "22", "2fsvalue22", "333", "2fsvalue333"));
    flush();
    execute("UPDATE %s SET m = m + ? WHERE k = 0 AND c = 0", map("1", "value1"));
    execute("UPDATE %s SET sm = sm + ? WHERE k = 0", map("1", "svalue1"));
    flush();
    assertRows(execute("SELECT m['22'] FROM %s WHERE k = 0 AND c = 0"), row("value22"));
    assertRows(execute("SELECT m['1'], m['22'], m['333'] FROM %s WHERE k = 0 AND c = 0"), row("value1", "value22", "value333"));
    assertRows(execute("SELECT m['2'..'3'] FROM %s WHERE k = 0 AND c = 0"), row(map("22", "value22")));
    execute("INSERT INTO %s(k, c, l, m, fm, o) VALUES (0, 1, 'foobar', ?, ?, 42)", map("1", "value1_2", "333", "value333_2"), map("1", "fvalue1_2", "333", "fvalue333_2"));
    assertRows(execute("SELECT c, m['1'], fm['1'] FROM %s WHERE k = 0"), row(0, "value1", "fvalue1"), row(1, "value1_2", "fvalue1_2"));
    assertRows(execute("SELECT c, sm['1'], fsm['1'] FROM %s WHERE k = 0"), row(0, "svalue1", "fsvalue1"), row(1, "svalue1", "fsvalue1"));
    assertRows(execute("SELECT c, m['1'], fm['1'] FROM %s WHERE k = 0 AND c = 0"), row(0, "value1", "fvalue1"));
    assertRows(execute("SELECT c, m['1'], fm['1'] FROM %s WHERE k = 0"), row(0, "value1", "fvalue1"), row(1, "value1_2", "fvalue1_2"));
    assertColumnNames(execute("SELECT k, l, m['1'] as mx, o FROM %s WHERE k = 0"), "k", "l", "mx", "o");
    assertColumnNames(execute("SELECT k, l, m['1'], o FROM %s WHERE k = 0"), "k", "l", "m['1']", "o");
    assertRows(execute("SELECT k, l, m['22'], o FROM %s WHERE k = 0"), row(0, "foobar", "value22", 42), row(0, "foobar", null, 42));
    assertColumnNames(execute("SELECT k, l, m['22'], o FROM %s WHERE k = 0"), "k", "l", "m['22']", "o");
    assertRows(execute("SELECT k, l, m['333'], o FROM %s WHERE k = 0"), row(0, "foobar", "value333", 42), row(0, "foobar", "value333_2", 42));
    assertRows(execute("SELECT k, l, m['foobar'], o FROM %s WHERE k = 0"), row(0, "foobar", null, 42), row(0, "foobar", null, 42));
    assertRows(execute("SELECT k, l, m['1'..'22'], o FROM %s WHERE k = 0"), row(0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, "foobar", map("1", "value1_2"), 42));
    assertRows(execute("SELECT k, l, m[''..'23'], o FROM %s WHERE k = 0"), row(0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, "foobar", map("1", "value1_2"), 42));
    assertColumnNames(execute("SELECT k, l, m[''..'23'], o FROM %s WHERE k = 0"), "k", "l", "m[''..'23']", "o");
    assertRows(execute("SELECT k, l, m['2'..'3'], o FROM %s WHERE k = 0"), row(0, "foobar", map("22", "value22"), 42), row(0, "foobar", null, 42));
    assertRows(execute("SELECT k, l, m['22'..], o FROM %s WHERE k = 0"), row(0, "foobar", map("22", "value22", "333", "value333"), 42), row(0, "foobar", map("333", "value333_2"), 42));
    assertRows(execute("SELECT k, l, m[..'22'], o FROM %s WHERE k = 0"), row(0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, "foobar", map("1", "value1_2"), 42));
    assertRows(execute("SELECT k, l, m, o FROM %s WHERE k = 0"), row(0, "foobar", map("1", "value1", "22", "value22", "333", "value333"), 42), row(0, "foobar", map("1", "value1_2", "333", "value333_2"), 42));
    assertRows(execute("SELECT k, l, m, m as m2, o FROM %s WHERE k = 0"), row(0, "foobar", map("1", "value1", "22", "value22", "333", "value333"), map("1", "value1", "22", "value22", "333", "value333"), 42), row(0, "foobar", map("1", "value1_2", "333", "value333_2"), map("1", "value1_2", "333", "value333_2"), 42));
    String f = createFunction(KEYSPACE, "text", "CREATE FUNCTION %s(arg text) " + "CALLED ON NULL INPUT " + "RETURNS TEXT " + "LANGUAGE java AS 'return arg;'");
    assertRows(execute("SELECT k, c, l, m[" + f + "('1').." + f + "('22')], o FROM %s WHERE k = 0"), row(0, 0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, 1, "foobar", map("1", "value1_2"), 42));
    assertRows(execute("SELECT k, c, l, m[" + f + "(?).." + f + "(?)], o FROM %s WHERE k = 0", "1", "22"), row(0, 0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, 1, "foobar", map("1", "value1_2"), 42));
    f = createFunction(KEYSPACE, "map<text,text>", "CREATE FUNCTION %s(m text) " + "CALLED ON NULL INPUT " + "RETURNS TEXT " + "LANGUAGE java AS $$return m;$$");
    assertRows(execute("SELECT k, c, " + f + "(m['1']) FROM %s WHERE k = 0"), row(0, 0, "value1"), row(0, 1, "value1_2"));
    f = createFunction(KEYSPACE, "map<text,text>,map<text,text>,int,int", "CREATE FUNCTION %s(m1 map<text,text>, m2 text, k int, c int) " + "CALLED ON NULL INPUT " + "RETURNS TEXT " + "LANGUAGE java AS $$return m1.get(\"1\") + ':' + m2 + ':' + k + ':' + c;$$");
    assertRows(execute("SELECT " + f + "(m, m['1'], k, c) FROM %s WHERE k = 0"), row("value1:value1:0:0"), row("value1_2:value1_2:0:1"));
    f = createFunction(KEYSPACE, "int,int", "CREATE FUNCTION %s(k int, c int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java AS $$return k + c;$$");
    assertColumnNames(execute("SELECT max(" + f + "(k, c)) as sel1, max(" + f + "(k, c)) FROM %s WHERE k = 0"), "sel1", "system.max(" + f + "(k, c))");
    assertRows(execute("SELECT max(" + f + "(k, c)) as sel1, max(" + f + "(k, c)) FROM %s WHERE k = 0"), row(1, 1));
    assertColumnNames(execute("SELECT max(" + f + "(k, c)) as sel1, max(" + f + "(k, c)) FROM %s"), "sel1", "system.max(" + f + "(k, c))");
    assertRows(execute("SELECT max(" + f + "(k, c)) as sel1, max(" + f + "(k, c)) FROM %s"), row(2, 2));
    assertRows(execute("SELECT c, m[?], fm[?] FROM %s WHERE k = 0", "1", "1"), row(0, "value1", "fvalue1"), row(1, "value1_2", "fvalue1_2"));
    assertRows(execute("SELECT c, sm[?], fsm[?] FROM %s WHERE k = 0", "1", "1"), row(0, "svalue1", "fsvalue1"), row(1, "svalue1", "fsvalue1"));
    assertRows(execute("SELECT k, l, m[?..?], o FROM %s WHERE k = 0", "1", "22"), row(0, "foobar", map("1", "value1", "22", "value22"), 42), row(0, "foobar", map("1", "value1_2"), 42));
}
253722.413169cassandra
public void testWholeUDT() throws Throwable {
    String typename = createType("CREATE TYPE %s (a int, b text)");
    String myType = KEYSPACE + '.' + typename;
    for (boolean frozen : new boolean[] { false, true }) {
        createTable(String.format("CREATE TABLE %%s (k int PRIMARY KEY, v %s)", frozen ? "frozen<" + myType + ">" : myType));
        Object v = userType("a", 0, "b", "abc");
        execute("INSERT INTO %s (k, v) VALUES (0, ?)", v);
        checkAppliesUDT("v = {a: 0, b: 'abc'}", v);
        checkAppliesUDT("v != null", v);
        checkAppliesUDT("v != {a: 1, b: 'abc'}", v);
        checkAppliesUDT("v != {a: 0, b: 'def'}", v);
        checkAppliesUDT("v > {a: -1, b: 'abc'}", v);
        checkAppliesUDT("v > {a: 0, b: 'aaa'}", v);
        checkAppliesUDT("v > {a: 0}", v);
        checkAppliesUDT("v >= {a: 0, b: 'aaa'}", v);
        checkAppliesUDT("v >= {a: 0, b: 'abc'}", v);
        checkAppliesUDT("v < {a: 0, b: 'zzz'}", v);
        checkAppliesUDT("v < {a: 1, b: 'abc'}", v);
        checkAppliesUDT("v < {a: 1}", v);
        checkAppliesUDT("v <= {a: 0, b: 'zzz'}", v);
        checkAppliesUDT("v <= {a: 0, b: 'abc'}", v);
        checkAppliesUDT("v IN (null, {a: 0, b: 'abc'}, {a: 1})", v);
        checkAppliesUDT("v > {a: -1, b: 'abc'} AND v > {a: 0}", v);
        checkAppliesUDT("v != null AND v IN ({a: 0, b: 'abc'})", v);
        checkDoesNotApplyUDT("v = {a: 0, b: 'def'}", v);
        checkDoesNotApplyUDT("v = {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v = null", v);
        checkDoesNotApplyUDT("v != {a: 0, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: 0, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v >= {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v >= {a: 0, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v < {a: -1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v < {a: 0, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v <= {a: -1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v <= {a: 0, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v IN ({a: 0}, {b: 'abc'}, {a: 0, b: 'def'}, null)", v);
        checkDoesNotApplyUDT("v IN ()", v);
        checkDoesNotApplyUDT("v IN () AND v IN ({a: 0, b: 'abc'})", v);
        checkDoesNotApplyUDT("v > {a: 0, b: 'aaa'} AND v < {a: 0, b: 'aaa'}", v);
        checkInvalidUDT("v = {a: 1, b: 'abc', c: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v = {foo: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v < {a: 1, b: 'abc', c: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v < null", v, InvalidRequestException.class);
        checkInvalidUDT("v <= {a: 1, b: 'abc', c: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v <= null", v, InvalidRequestException.class);
        checkInvalidUDT("v > {a: 1, b: 'abc', c: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v > null", v, InvalidRequestException.class);
        checkInvalidUDT("v >= {a: 1, b: 'abc', c: 'foo'}", v, InvalidRequestException.class);
        checkInvalidUDT("v >= null", v, InvalidRequestException.class);
        checkInvalidUDT("v IN null", v, SyntaxException.class);
        checkInvalidUDT("v IN 367", v, SyntaxException.class);
        checkInvalidUDT("v CONTAINS KEY 123", v, InvalidRequestException.class);
        checkInvalidUDT("v CONTAINS 'bar'", v, InvalidRequestException.class);
        v = userType("a", 0, "b", null);
        execute("INSERT INTO %s (k, v) VALUES (0, ?)", v);
        checkAppliesUDT("v = {a: 0}", v);
        checkAppliesUDT("v = {a: 0, b: null}", v);
        checkAppliesUDT("v != null", v);
        checkAppliesUDT("v != {a: 1, b: null}", v);
        checkAppliesUDT("v != {a: 1}", v);
        checkAppliesUDT("v != {a: 0, b: 'def'}", v);
        checkAppliesUDT("v > {a: -1, b: 'abc'}", v);
        checkAppliesUDT("v > {a: -1}", v);
        checkAppliesUDT("v >= {a: 0}", v);
        checkAppliesUDT("v >= {a: -1, b: 'abc'}", v);
        checkAppliesUDT("v < {a: 0, b: 'zzz'}", v);
        checkAppliesUDT("v < {a: 1, b: 'abc'}", v);
        checkAppliesUDT("v < {a: 1}", v);
        checkAppliesUDT("v <= {a: 0, b: 'zzz'}", v);
        checkAppliesUDT("v <= {a: 0}", v);
        checkAppliesUDT("v IN (null, {a: 0, b: 'abc'}, {a: 0})", v);
        checkAppliesUDT("v > {a: -1, b: 'abc'} AND v >= {a: 0}", v);
        checkAppliesUDT("v != null AND v IN ({a: 0}, {a: 0, b: null})", v);
        checkDoesNotApplyUDT("v = {a: 0, b: 'def'}", v);
        checkDoesNotApplyUDT("v = {a: 1}", v);
        checkDoesNotApplyUDT("v = {b: 'abc'}", v);
        checkDoesNotApplyUDT("v = null", v);
        checkDoesNotApplyUDT("v != {a: 0}", v);
        checkDoesNotApplyUDT("v != {a: 0, b: null}", v);
        checkDoesNotApplyUDT("v > {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: 0}", v);
        checkDoesNotApplyUDT("v >= {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v >= {a: 1}", v);
        checkDoesNotApplyUDT("v < {a: -1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v < {a: -1}", v);
        checkDoesNotApplyUDT("v < {a: 0}", v);
        checkDoesNotApplyUDT("v <= {a: -1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v <= {a: -1}", v);
        checkDoesNotApplyUDT("v IN ({a: 1}, {b: 'abc'}, {a: 0, b: 'def'}, null)", v);
        checkDoesNotApplyUDT("v IN ()", v);
        checkDoesNotApplyUDT("v IN () AND v IN ({a: 0})", v);
        checkDoesNotApplyUDT("v > {a: -1} AND v < {a: 0}", v);
        v = userType("a", null, "b", "abc");
        execute("INSERT INTO %s (k, v) VALUES (0, ?)", v);
        checkAppliesUDT("v = {a: null, b: 'abc'}", v);
        checkAppliesUDT("v = {b: 'abc'}", v);
        checkAppliesUDT("v != null", v);
        checkAppliesUDT("v != {a: 0, b: 'abc'}", v);
        checkAppliesUDT("v != {a: 0}", v);
        checkAppliesUDT("v != {b: 'def'}", v);
        checkAppliesUDT("v > {a: null, b: 'aaa'}", v);
        checkAppliesUDT("v > {b: 'aaa'}", v);
        checkAppliesUDT("v >= {a: null, b: 'aaa'}", v);
        checkAppliesUDT("v >= {b: 'abc'}", v);
        checkAppliesUDT("v < {a: null, b: 'zzz'}", v);
        checkAppliesUDT("v < {a: 0, b: 'abc'}", v);
        checkAppliesUDT("v < {a: 0}", v);
        checkAppliesUDT("v < {b: 'zzz'}", v);
        checkAppliesUDT("v <= {a: null, b: 'zzz'}", v);
        checkAppliesUDT("v <= {a: 0}", v);
        checkAppliesUDT("v <= {b: 'abc'}", v);
        checkAppliesUDT("v IN (null, {a: null, b: 'abc'}, {a: 0})", v);
        checkAppliesUDT("v IN (null, {a: 0, b: 'abc'}, {b: 'abc'})", v);
        checkAppliesUDT("v > {b: 'aaa'} AND v >= {b: 'abc'}", v);
        checkAppliesUDT("v != null AND v IN ({a: 0}, {a: null, b: 'abc'})", v);
        checkDoesNotApplyUDT("v = {a: 0, b: 'def'}", v);
        checkDoesNotApplyUDT("v = {a: 1}", v);
        checkDoesNotApplyUDT("v = {b: 'def'}", v);
        checkDoesNotApplyUDT("v = null", v);
        checkDoesNotApplyUDT("v != {b: 'abc'}", v);
        checkDoesNotApplyUDT("v != {a: null, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: null, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v > {b: 'zzz'}", v);
        checkDoesNotApplyUDT("v >= {a: null, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v >= {a: 1}", v);
        checkDoesNotApplyUDT("v >= {b: 'zzz'}", v);
        checkDoesNotApplyUDT("v < {a: null, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v < {b: 'aaa'}", v);
        checkDoesNotApplyUDT("v <= {a: null, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v <= {b: 'aaa'}", v);
        checkDoesNotApplyUDT("v IN ({a: 1}, {a: 1, b: 'abc'}, {a: null, b: 'def'}, null)", v);
        checkDoesNotApplyUDT("v IN ()", v);
        checkDoesNotApplyUDT("v IN () AND v IN ({b: 'abc'})", v);
        checkDoesNotApplyUDT("v IN () AND v IN ({a: null, b: 'abc'})", v);
        checkDoesNotApplyUDT("v > {a: -1} AND v < {a: 0}", v);
        v = null;
        execute("INSERT INTO %s (k, v) VALUES (0, ?)", v);
        checkAppliesUDT("v = null", v);
        checkAppliesUDT("v IN (null, {a: null, b: 'abc'}, {a: 0})", v);
        checkAppliesUDT("v IN (null, {a: 0, b: 'abc'}, {b: 'abc'})", v);
        checkAppliesUDT("v = null AND v IN (null, {a: 0}, {a: null, b: 'abc'})", v);
        checkDoesNotApplyUDT("v = {a: 0, b: 'def'}", v);
        checkDoesNotApplyUDT("v = {a: 1}", v);
        checkDoesNotApplyUDT("v = {b: 'def'}", v);
        checkDoesNotApplyUDT("v != null", v);
        checkDoesNotApplyUDT("v > {a: 1, b: 'abc'}", v);
        checkDoesNotApplyUDT("v > {a: null, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v > {b: 'zzz'}", v);
        checkDoesNotApplyUDT("v >= {a: null, b: 'zzz'}", v);
        checkDoesNotApplyUDT("v >= {a: 1}", v);
        checkDoesNotApplyUDT("v >= {b: 'zzz'}", v);
        checkDoesNotApplyUDT("v < {a: null, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v < {b: 'aaa'}", v);
        checkDoesNotApplyUDT("v <= {a: null, b: 'aaa'}", v);
        checkDoesNotApplyUDT("v <= {b: 'aaa'}", v);
        checkDoesNotApplyUDT("v IN ({a: 1}, {a: 1, b: 'abc'}, {a: null, b: 'def'})", v);
        checkDoesNotApplyUDT("v IN ()", v);
        checkDoesNotApplyUDT("v IN () AND v IN ({b: 'abc'})", v);
        checkDoesNotApplyUDT("v > {a: -1} AND v < {a: 0}", v);
    }
}
253606.4214147cassandra
public void testFilteringOnClusteringColumnsWithLimitAndStaticColumns() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, s int static, c int, primary key (a, b))" + " WITH caching = {'keys': 'ALL', 'rows_per_partition' : 'ALL'}");
    for (int i = 0; i < 4; i++) {
        execute("INSERT INTO %s (a, s) VALUES (?, ?)", i, i);
        for (int j = 0; j < 3; j++) if (!((i == 0 || i == 3) && j == 1))
            execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", i, j, i + j);
    }
    beforeAndAfterFlush(() -> {
        assertRows(execute("SELECT * FROM %s"), row(0, 0, 0, 0), row(0, 2, 0, 2), row(1, 0, 1, 1), row(1, 1, 1, 2), row(1, 2, 1, 3), row(2, 0, 2, 2), row(2, 1, 2, 3), row(2, 2, 2, 4), row(3, 0, 3, 3), row(3, 2, 3, 5));
        assertRows(execute("SELECT * FROM %s WHERE b = 1 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
        assertRows(execute("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
        assertRows(execute("SELECT * FROM %s WHERE b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
        for (int pageSize = 1; pageSize < 4; pageSize++) {
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 GROUP BY a LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b >= 1 AND b <= 1 GROUP BY a LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
        }
    });
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2, 3) AND b = 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2, 3) AND b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2, 3) AND b = 1 ORDER BY b DESC LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2, 3) AND b >= 1 AND b <= 1 ORDER BY b DESC LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    execute("SELECT * FROM %s WHERE a IN (0, 1, 2, 3)");
    assertRows(execute("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2) AND b = 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (0, 1, 2) AND b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 2), row(2, 1, 2, 3));
    for (int pageSize = 1; pageSize < 4; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b >= 1 AND b <= 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 2), row(2, 1, 2, 3));
    }
    createTable("CREATE TABLE %s (a int, b int, c int, s int static, d int, primary key (a, b, c))" + " WITH caching = {'keys': 'ALL', 'rows_per_partition' : 'ALL'}");
    for (int i = 0; i < 3; i++) {
        execute("INSERT INTO %s (a, s) VALUES (?, ?)", i, i);
        for (int j = 0; j < 3; j++) if (!(i == 0 && j == 1))
            execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", i, j, j, i + j);
    }
    beforeAndAfterFlush(() -> {
        assertRows(execute("SELECT * FROM %s"), row(0, 0, 0, 0, 0), row(0, 2, 2, 0, 2), row(1, 0, 0, 1, 1), row(1, 1, 1, 1, 2), row(1, 2, 2, 1, 3), row(2, 0, 0, 2, 2), row(2, 1, 1, 2, 3), row(2, 2, 2, 2, 4));
        assertRows(execute("SELECT * FROM %s WHERE b = 1 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
        assertRows(execute("SELECT * FROM %s WHERE b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
        for (int pageSize = 1; pageSize < 4; pageSize++) {
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 GROUP BY a, b LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 GROUP BY a, b LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
        }
    });
    execute("SELECT * FROM %s WHERE a IN (0, 1, 2)");
    assertRows(execute("SELECT * FROM %s WHERE b = 1 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) AND b = 1 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE a IN (1, 2, 3, 4) AND b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 LIMIT 2 ALLOW FILTERING"), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
    for (int pageSize = 1; pageSize < 4; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b = 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE b IN (1, 2, 3, 4) AND c >= 1 AND c <= 1 LIMIT 2 ALLOW FILTERING", pageSize), row(1, 1, 1, 1, 2), row(2, 1, 1, 2, 3));
    }
}
253132.836124elasticsearch
private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
    HttpHost publishedHost = null;
    Set<HttpHost> boundHosts = new HashSet<>();
    String name = null;
    String version = null;
    final Map<String, String> protoAttributes = new HashMap<String, String>();
    boolean sawRoles = false;
    final Set<String> roles = new TreeSet<>();
    String fieldName = null;
    while (parser.nextToken() != JsonToken.END_OBJECT) {
        if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
            fieldName = parser.getCurrentName();
        } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
            if ("http".equals(fieldName)) {
                while (parser.nextToken() != JsonToken.END_OBJECT) {
                    if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
                        String address = parser.getValueAsString();
                        String host;
                        URI publishAddressAsURI;
                        if (address.contains("/")) {
                            String[] cnameAndURI = address.split("/", 2);
                            publishAddressAsURI = URI.create(scheme + "://" + cnameAndURI[1]);
                            host = cnameAndURI[0];
                        } else {
                            publishAddressAsURI = URI.create(scheme + "://" + address);
                            host = publishAddressAsURI.getHost();
                        }
                        publishedHost = new HttpHost(host, publishAddressAsURI.getPort(), publishAddressAsURI.getScheme());
                    } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) {
                        while (parser.nextToken() != JsonToken.END_ARRAY) {
                            URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
                            boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()));
                        }
                    } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
                        parser.skipChildren();
                    }
                }
            } else if ("attributes".equals(fieldName)) {
                while (parser.nextToken() != JsonToken.END_OBJECT) {
                    if (parser.getCurrentToken() == JsonToken.VALUE_STRING) {
                        String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString());
                        if (oldValue != null) {
                            throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]");
                        }
                    } else {
                        parser.skipChildren();
                    }
                }
            } else {
                parser.skipChildren();
            }
        } else if (parser.currentToken() == JsonToken.START_ARRAY) {
            if ("roles".equals(fieldName)) {
                sawRoles = true;
                while (parser.nextToken() != JsonToken.END_ARRAY) {
                    roles.add(parser.getText());
                }
            } else {
                parser.skipChildren();
            }
        } else if (parser.currentToken().isScalarValue()) {
            if ("version".equals(fieldName)) {
                version = parser.getText();
            } else if ("name".equals(fieldName)) {
                name = parser.getText();
            }
        }
    }
    if (publishedHost == null) {
        logger.debug("skipping node [" + nodeId + "] with http disabled");
        return null;
    }
    Map<String, List<String>> realAttributes = new HashMap<>(protoAttributes.size());
    List<String> keys = new ArrayList<>(protoAttributes.keySet());
    for (String key : keys) {
        if (key.endsWith(".0")) {
            String realKey = key.substring(0, key.length() - 2);
            List<String> values = new ArrayList<>();
            int i = 0;
            while (true) {
                String value = protoAttributes.remove(realKey + "." + i);
                if (value == null) {
                    break;
                }
                values.add(value);
                i++;
            }
            realAttributes.put(realKey, unmodifiableList(values));
        }
    }
    for (Map.Entry<String, String> entry : protoAttributes.entrySet()) {
        realAttributes.put(entry.getKey(), singletonList(entry.getValue()));
    }
    if (version.startsWith("2.")) {
        boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false);
        Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null);
        Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null);
        if ((masterAttribute == null && false == clientAttribute) || masterAttribute) {
            roles.add("master");
        }
        if ((dataAttribute == null && false == clientAttribute) || dataAttribute) {
            roles.add("data");
        }
    } else {
        assert sawRoles : "didn't see roles for [" + nodeId + "]";
    }
    assert boundHosts.contains(publishedHost) : "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts";
    logger.trace("adding node [" + nodeId + "]");
    return new Node(publishedHost, boundHosts, name, version, new Roles(roles), unmodifiableMap(realAttributes));
}
252109.4637126elasticsearch
private static Instant parseMath(final String mathString, final Instant time, final boolean roundUpProperty, ZoneId timeZone) throws ElasticsearchParseException {
    if (timeZone == null) {
        timeZone = ZoneOffset.UTC;
    }
    ZonedDateTime dateTime = ZonedDateTime.ofInstant(time, timeZone);
    for (int i = 0; i < mathString.length(); ) {
        char c = mathString.charAt(i++);
        final boolean round;
        final int sign;
        if (c == '/') {
            round = true;
            sign = 1;
        } else {
            round = false;
            if (c == '+') {
                sign = 1;
            } else if (c == '-') {
                sign = -1;
            } else {
                throw new ElasticsearchParseException("operator not supported for date math [{}]", mathString);
            }
        }
        if (i >= mathString.length()) {
            throw new ElasticsearchParseException("truncated date math [{}]", mathString);
        }
        final int num;
        if (Character.isDigit(mathString.charAt(i)) == false) {
            num = 1;
        } else {
            int numFrom = i;
            while (i < mathString.length() && Character.isDigit(mathString.charAt(i))) {
                i++;
            }
            if (i >= mathString.length()) {
                throw new ElasticsearchParseException("truncated date math [{}]", mathString);
            }
            num = Integer.parseInt(mathString.substring(numFrom, i));
        }
        if (round) {
            if (num != 1) {
                throw new ElasticsearchParseException("rounding `/` can only be used on single unit types [{}]", mathString);
            }
        }
        char unit = mathString.charAt(i++);
        switch(unit) {
            case 'y':
                if (round) {
                    dateTime = dateTime.withDayOfYear(1).with(LocalTime.MIN);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusYears(1);
                    }
                } else {
                    dateTime = dateTime.plusYears(sign * num);
                }
                break;
            case 'M':
                if (round) {
                    dateTime = dateTime.withDayOfMonth(1).with(LocalTime.MIN);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusMonths(1);
                    }
                } else {
                    dateTime = dateTime.plusMonths(sign * num);
                }
                break;
            case 'w':
                if (round) {
                    dateTime = dateTime.with(TemporalAdjusters.previousOrSame(DayOfWeek.MONDAY)).with(LocalTime.MIN);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusWeeks(1);
                    }
                } else {
                    dateTime = dateTime.plusWeeks(sign * num);
                }
                break;
            case 'd':
                if (round) {
                    dateTime = dateTime.with(LocalTime.MIN);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusDays(1);
                    }
                } else {
                    dateTime = dateTime.plusDays(sign * num);
                }
                break;
            case 'h':
            case 'H':
                if (round) {
                    dateTime = dateTime.withMinute(0).withSecond(0).withNano(0);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusHours(1);
                    }
                } else {
                    dateTime = dateTime.plusHours(sign * num);
                }
                break;
            case 'm':
                if (round) {
                    dateTime = dateTime.withSecond(0).withNano(0);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusMinutes(1);
                    }
                } else {
                    dateTime = dateTime.plusMinutes(sign * num);
                }
                break;
            case 's':
                if (round) {
                    dateTime = dateTime.withNano(0);
                    if (roundUpProperty) {
                        dateTime = dateTime.plusSeconds(1);
                    }
                } else {
                    dateTime = dateTime.plusSeconds(sign * num);
                }
                break;
            default:
                throw new ElasticsearchParseException("unit [{}] not supported for date math [{}]", unit, mathString);
        }
        if (round && roundUpProperty) {
            dateTime = dateTime.minus(1, ChronoField.MILLI_OF_SECOND.getBaseUnit());
        }
    }
    return dateTime.toInstant();
}
253652.91180elasticsearch
public void testCalculateDecreaseShardingRecommendations() {
    {
        Metadata.Builder builder = Metadata.builder();
        Function<DataStreamAutoShardingEvent, DataStream> dataStreamSupplier = (autoShardingEvent) -> createDataStream(builder, dataStreamName, 3, now, List.of(now - 10_000, now - 7000, now - 5000, now - 2000, now - 1000), getWriteLoad(3, 0.25), autoShardingEvent);
        DataStream dataStream = dataStreamSupplier.apply(null);
        builder.put(dataStream);
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodeFeatures(Map.of("n1", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), "n2", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()))).metadata(builder).build();
        AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0);
        assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE));
        assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueMillis(TimeValue.timeValueDays(3).millis() - 10_000)));
        assertThat(autoShardingResult.targetNumberOfShards(), is(1));
    }
    {
        Metadata.Builder builder = Metadata.builder();
        Function<DataStreamAutoShardingEvent, DataStream> dataStreamSupplier = (autoShardingEvent) -> createDataStream(builder, dataStreamName, 3, now, List.of(now - TimeValue.timeValueDays(21).getMillis(), now - TimeValue.timeValueDays(15).getMillis(), now - TimeValue.timeValueDays(4).getMillis(), now - TimeValue.timeValueDays(2).getMillis(), now - 1000), getWriteLoad(3, 0.333), autoShardingEvent);
        DataStream dataStream = dataStreamSupplier.apply(null);
        builder.put(dataStream);
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodeFeatures(Map.of("n1", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), "n2", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()))).metadata(builder).build();
        AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0);
        assertThat(autoShardingResult.type(), is(DECREASE_SHARDS));
        assertThat(autoShardingResult.targetNumberOfShards(), is(1));
        assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO));
    }
    {
        Metadata.Builder builder = Metadata.builder();
        Function<DataStreamAutoShardingEvent, DataStream> dataStreamSupplier = (autoShardingEvent) -> createDataStream(builder, dataStreamName, 3, now, List.of(now - TimeValue.timeValueDays(21).getMillis(), now - TimeValue.timeValueDays(15).getMillis(), now - TimeValue.timeValueDays(4).getMillis(), now - TimeValue.timeValueDays(2).getMillis(), now - 1000), getWriteLoad(3, 0.333), autoShardingEvent);
        DataStream dataStream = dataStreamSupplier.apply(new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 2), 2, now - TimeValue.timeValueDays(4).getMillis()));
        builder.put(dataStream);
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodeFeatures(Map.of("n1", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), "n2", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()))).metadata(builder).build();
        AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0);
        assertThat(autoShardingResult.type(), is(DECREASE_SHARDS));
        assertThat(autoShardingResult.targetNumberOfShards(), is(1));
        assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO));
    }
    {
        Metadata.Builder builder = Metadata.builder();
        Function<DataStreamAutoShardingEvent, DataStream> dataStreamSupplier = (autoShardingEvent) -> createDataStream(builder, dataStreamName, 3, now, List.of(now - TimeValue.timeValueDays(21).getMillis(), now - TimeValue.timeValueDays(2).getMillis(), now - TimeValue.timeValueDays(1).getMillis(), now - 1000), getWriteLoad(3, 0.25), autoShardingEvent);
        DataStream dataStream = dataStreamSupplier.apply(new DataStreamAutoShardingEvent(DataStream.getDefaultBackingIndexName(dataStreamName, 2), 2, now - TimeValue.timeValueDays(2).getMillis()));
        builder.put(dataStream);
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodeFeatures(Map.of("n1", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), "n2", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()))).metadata(builder).build();
        AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 1.0);
        assertThat(autoShardingResult.type(), is(COOLDOWN_PREVENTED_DECREASE));
        assertThat(autoShardingResult.targetNumberOfShards(), is(1));
        assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.timeValueDays(1)));
    }
    {
        Metadata.Builder builder = Metadata.builder();
        Function<DataStreamAutoShardingEvent, DataStream> dataStreamSupplier = (autoShardingEvent) -> createDataStream(builder, dataStreamName, 3, now, List.of(now - TimeValue.timeValueDays(21).getMillis(), now - TimeValue.timeValueDays(15).getMillis(), now - TimeValue.timeValueDays(4).getMillis(), now - TimeValue.timeValueDays(2).getMillis(), now - 1000), getWriteLoad(3, 1.333), autoShardingEvent);
        DataStream dataStream = dataStreamSupplier.apply(null);
        builder.put(dataStream);
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodeFeatures(Map.of("n1", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()), "n2", Set.of(DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE.id()))).metadata(builder).build();
        AutoShardingResult autoShardingResult = service.calculate(state, dataStream, 4.0);
        assertThat(autoShardingResult.type(), is(NO_CHANGE_REQUIRED));
        assertThat(autoShardingResult.targetNumberOfShards(), is(3));
        assertThat(autoShardingResult.coolDownRemaining(), is(TimeValue.ZERO));
    }
}
251071.6419215elasticsearch
public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception {
    int numClusters = randomIntBetween(1, 10);
    DiscoveryNode[] nodes = new DiscoveryNode[numClusters];
    Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
    Settings.Builder builder = Settings.builder();
    MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false);
    Settings settings = builder.build();
    boolean local = randomBoolean();
    OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null;
    int totalClusters = numClusters + (local ? 1 : 0);
    TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0);
    ResolvedIndices mockResolvedIndices = createMockResolvedIndices(localIndices, remoteIndicesByCluster);
    try (MockTransportService service = MockTransportService.createNewService(settings, VersionInformation.CURRENT, TransportVersion.current(), threadPool, null)) {
        service.start();
        service.acceptIncomingRequests();
        RemoteClusterService remoteClusterService = service.getRemoteClusterService();
        int numDisconnectedClusters = randomIntBetween(1, numClusters);
        Set<DiscoveryNode> disconnectedNodes = Sets.newHashSetWithExpectedSize(numDisconnectedClusters);
        Set<Integer> disconnectedNodesIndices = Sets.newHashSetWithExpectedSize(numDisconnectedClusters);
        while (disconnectedNodes.size() < numDisconnectedClusters) {
            int i = randomIntBetween(0, numClusters - 1);
            if (disconnectedNodes.add(nodes[i])) {
                assertTrue(disconnectedNodesIndices.add(i));
            }
        }
        CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters);
        RemoteClusterServiceTests.addConnectionListener(remoteClusterService, new TransportConnectionListener() {

            @Override
            public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) {
                if (disconnectedNodes.remove(node)) {
                    disconnectedLatch.countDown();
                }
            }
        });
        for (DiscoveryNode disconnectedNode : disconnectedNodes) {
            service.addFailToSendNoConnectRule(disconnectedNode.getAddress());
        }
        {
            SearchRequest searchRequest = new SearchRequest();
            final CountDownLatch latch = new CountDownLatch(1);
            SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
            AtomicReference<Exception> failure = new AtomicReference<>();
            LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch);
            TaskId parentTaskId = new TaskId("n", 1);
            SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap());
            TransportSearchAction.ccsRemoteReduce(task, parentTaskId, searchRequest, mockResolvedIndices, new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
            if (localIndices == null) {
                assertNull(setOnce.get());
            } else {
                Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
                assertEquals("", tuple.v1().getLocalClusterAlias());
                assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
                resolveWithEmptySearchResponse(tuple);
            }
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            assertNotNull(failure.get());
            assertThat(failure.get(), instanceOf(RemoteTransportException.class));
            assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster ["));
            assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class));
        }
        for (int i : disconnectedNodesIndices) {
            RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
        }
        {
            SearchRequest searchRequest = new SearchRequest();
            final CountDownLatch latch = new CountDownLatch(1);
            SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
            SetOnce<SearchResponse> response = new SetOnce<>();
            LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> {
                newValue.mustIncRef();
                response.set(newValue);
            }), latch);
            Set<String> clusterAliases = new HashSet<>(remoteClusterService.getRegisteredRemoteClusterNames());
            if (localIndices != null) {
                clusterAliases.add("");
            }
            TaskId parentTaskId = new TaskId("n", 1);
            SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap());
            TransportSearchAction.ccsRemoteReduce(task, parentTaskId, searchRequest, mockResolvedIndices, new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
            if (localIndices == null) {
                assertNull(setOnce.get());
            } else {
                Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
                assertEquals("", tuple.v1().getLocalClusterAlias());
                assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
                resolveWithEmptySearchResponse(tuple);
            }
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            SearchResponse searchResponse = response.get();
            try {
                assertEquals(disconnectedNodesIndices.size(), searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
                assertEquals(totalClusters, searchResponse.getClusters().getTotal());
                int successful = totalClusters - disconnectedNodesIndices.size();
                assertEquals(successful, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
                assertEquals(successful == 0 ? 0 : successful + 1, searchResponse.getNumReducePhases());
            } finally {
                searchResponse.decRef();
            }
        }
        assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS));
        service.clearAllRules();
        if (randomBoolean()) {
            for (int i : disconnectedNodesIndices) {
                if (randomBoolean()) {
                    RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true);
                }
            }
        }
        assertBusy(() -> {
            SearchRequest searchRequest = new SearchRequest();
            final CountDownLatch latch = new CountDownLatch(1);
            SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
            AtomicReference<SearchResponse> response = new AtomicReference<>();
            LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> {
                newValue.mustIncRef();
                response.set(newValue);
            }), latch);
            Set<String> clusterAliases = new HashSet<>(remoteClusterService.getRegisteredRemoteClusterNames());
            if (localIndices != null) {
                clusterAliases.add("");
            }
            TaskId parentTaskId = new TaskId("n", 1);
            SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap());
            TransportSearchAction.ccsRemoteReduce(task, parentTaskId, searchRequest, mockResolvedIndices, new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
            if (localIndices == null) {
                assertNull(setOnce.get());
            } else {
                Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
                assertEquals("", tuple.v1().getLocalClusterAlias());
                assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
                resolveWithEmptySearchResponse(tuple);
            }
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            SearchResponse searchResponse = response.get();
            try {
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
                assertEquals(totalClusters, searchResponse.getClusters().getTotal());
                assertEquals(totalClusters, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
                assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases());
            } finally {
                searchResponse.decRef();
            }
        });
        assertEquals(0, service.getConnectionManager().size());
    } finally {
        for (MockTransportService mockTransportService : mockTransportServices) {
            mockTransportService.close();
        }
    }
}
253825.0217141elasticsearch
private void doTestDescriptions(boolean includeKey) {
    final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    ByteSizeValue hundredBytes = ByteSizeValue.parseBytesSizeValue("100b", "test");
    ByteSizeValue thousandTb = ByteSizeValue.parseBytesSizeValue("1000tb", "test");
    String lowWatermarkPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey() + "=" : "";
    String highWatermarkPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey() + "=" : "";
    String floodWatermarkPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey() + "=" : "";
    String frozenFloodWatermarkPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_WATERMARK_SETTING.getKey() + "=" : "";
    String lowMaxHeadroomPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey() + "=" : "max_headroom=";
    String highMaxHeadroomPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey() + "=" : "max_headroom=";
    String floodMaxHeadroomPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey() + "=" : "max_headroom=";
    String frozenFloodMaxHeadroomPrefix = includeKey ? DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_MAX_HEADROOM_SETTING.getKey() + "=" : "max_headroom=";
    DiskThresholdSettings diskThresholdSettings = new DiskThresholdSettings(Settings.EMPTY, clusterSettings);
    assertThat(diskThresholdSettings.describeLowThreshold(hundredBytes, includeKey), equalTo(lowWatermarkPrefix + "85%"));
    assertThat(diskThresholdSettings.describeHighThreshold(hundredBytes, includeKey), equalTo(highWatermarkPrefix + "90%"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(hundredBytes, includeKey), equalTo(floodWatermarkPrefix + "95%"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(hundredBytes, includeKey), equalTo(frozenFloodWatermarkPrefix + "95%"));
    assertThat(diskThresholdSettings.describeLowThreshold(thousandTb, includeKey), equalTo(lowMaxHeadroomPrefix + "200gb"));
    assertThat(diskThresholdSettings.describeHighThreshold(thousandTb, includeKey), equalTo(highMaxHeadroomPrefix + "150gb"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(thousandTb, includeKey), equalTo(floodMaxHeadroomPrefix + "100gb"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(thousandTb, includeKey), equalTo(frozenFloodMaxHeadroomPrefix + "20gb"));
    diskThresholdSettings = new DiskThresholdSettings(Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), randomBoolean() ? "91.2%" : "0.912").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), randomBoolean() ? "91.3%" : "0.913").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), randomBoolean() ? "91.4%" : "0.914").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_WATERMARK_SETTING.getKey(), randomBoolean() ? "91.5%" : "0.915").build(), clusterSettings);
    assertThat(diskThresholdSettings.describeLowThreshold(hundredBytes, includeKey), equalTo(lowWatermarkPrefix + "91.2%"));
    assertThat(diskThresholdSettings.describeHighThreshold(hundredBytes, includeKey), equalTo(highWatermarkPrefix + "91.3%"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(hundredBytes, includeKey), equalTo(floodWatermarkPrefix + "91.4%"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(hundredBytes, includeKey), equalTo(frozenFloodWatermarkPrefix + "91.5%"));
    assertThat(diskThresholdSettings.describeLowThreshold(thousandTb, includeKey), equalTo(lowWatermarkPrefix + "91.2%"));
    assertThat(diskThresholdSettings.describeHighThreshold(thousandTb, includeKey), equalTo(highWatermarkPrefix + "91.3%"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(thousandTb, includeKey), equalTo(floodWatermarkPrefix + "91.4%"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(thousandTb, includeKey), equalTo(frozenFloodWatermarkPrefix + "91.5%"));
    diskThresholdSettings = new DiskThresholdSettings(Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1GB").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "10MB").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "2B").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_WATERMARK_SETTING.getKey(), "1B").build(), clusterSettings);
    assertThat(diskThresholdSettings.describeLowThreshold(hundredBytes, includeKey), equalTo(lowWatermarkPrefix + "1gb"));
    assertThat(diskThresholdSettings.describeHighThreshold(hundredBytes, includeKey), equalTo(highWatermarkPrefix + "10mb"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(hundredBytes, includeKey), equalTo(floodWatermarkPrefix + "2b"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(hundredBytes, includeKey), equalTo(frozenFloodWatermarkPrefix + "1b"));
    assertThat(diskThresholdSettings.describeLowThreshold(thousandTb, includeKey), equalTo(lowWatermarkPrefix + "1gb"));
    assertThat(diskThresholdSettings.describeHighThreshold(thousandTb, includeKey), equalTo(highWatermarkPrefix + "10mb"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(thousandTb, includeKey), equalTo(floodWatermarkPrefix + "2b"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(thousandTb, includeKey), equalTo(frozenFloodWatermarkPrefix + "1b"));
    diskThresholdSettings = new DiskThresholdSettings(Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), randomBoolean() ? "31.2%" : "0.312").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), randomBoolean() ? "31.3%" : "0.313").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), randomBoolean() ? "31.4%" : "0.314").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_WATERMARK_SETTING.getKey(), randomBoolean() ? "31.5%" : "0.315").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), "100gb").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), "50gb").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), "10gb").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_FROZEN_MAX_HEADROOM_SETTING.getKey(), "10gb").build(), clusterSettings);
    assertThat(diskThresholdSettings.describeLowThreshold(hundredBytes, includeKey), equalTo(lowWatermarkPrefix + "31.2%"));
    assertThat(diskThresholdSettings.describeHighThreshold(hundredBytes, includeKey), equalTo(highWatermarkPrefix + "31.3%"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(hundredBytes, includeKey), equalTo(floodWatermarkPrefix + "31.4%"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(hundredBytes, includeKey), equalTo(frozenFloodWatermarkPrefix + "31.5%"));
    assertThat(diskThresholdSettings.describeLowThreshold(thousandTb, includeKey), equalTo(lowMaxHeadroomPrefix + "100gb"));
    assertThat(diskThresholdSettings.describeHighThreshold(thousandTb, includeKey), equalTo(highMaxHeadroomPrefix + "50gb"));
    assertThat(diskThresholdSettings.describeFloodStageThreshold(thousandTb, includeKey), equalTo(floodMaxHeadroomPrefix + "10gb"));
    assertThat(diskThresholdSettings.describeFrozenFloodStageThreshold(thousandTb, includeKey), equalTo(frozenFloodMaxHeadroomPrefix + "10gb"));
}
257100.08130elasticsearch
public void testBalanceAllNodesStartedAddIndex() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put("cluster.routing.allocation.node_initial_primaries_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue());
    }
    logger.info("Adding three node and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
    }
    logger.info("Another round of rebalancing");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the more shards");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
    }
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).count(), equalTo(2L));
    logger.info("Add new index 3 shards 1 replica");
    Metadata updatedMetadata = Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder("test1").settings(indexSettings(IndexVersion.current(), 3, 1))).build();
    RoutingTable updatedRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState.routingTable()).addAsNew(updatedMetadata.index("test1")).build();
    clusterState = ClusterState.builder(clusterState).metadata(updatedMetadata).routingTable(updatedRoutingTable).build();
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
    }
    logger.info("Another round of rebalancing");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the more shards");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
    }
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).count(), equalTo(2L));
}
254180.481185elasticsearch
public void testStrictParsing() {
    assertParses("2018W313", "strict_basic_week_date");
    assertParseException("18W313", "strict_basic_week_date");
    assertParses("2018W313T121212.1Z", "strict_basic_week_date_time");
    assertParses("2018W313T121212.123Z", "strict_basic_week_date_time");
    assertParses("2018W313T121212.123456789Z", "strict_basic_week_date_time");
    assertParses("2018W313T121212.1+0100", "strict_basic_week_date_time");
    assertParses("2018W313T121212.123+0100", "strict_basic_week_date_time");
    assertParses("2018W313T121212.1+01:00", "strict_basic_week_date_time");
    assertParses("2018W313T121212.123+01:00", "strict_basic_week_date_time");
    assertParseException("2018W313T12128.123Z", "strict_basic_week_date_time");
    assertParseException("2018W313T12128.123456789Z", "strict_basic_week_date_time");
    assertParseException("2018W313T81212.123Z", "strict_basic_week_date_time");
    assertParseException("2018W313T12812.123Z", "strict_basic_week_date_time");
    assertParseException("2018W313T12812.1Z", "strict_basic_week_date_time");
    assertParses("2018W313T121212Z", "strict_basic_week_date_time_no_millis");
    assertParses("2018W313T121212+0100", "strict_basic_week_date_time_no_millis");
    assertParses("2018W313T121212+01:00", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12128Z", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12128+0100", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12128+01:00", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T81212Z", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T81212+0100", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T81212+01:00", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12812Z", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12812+0100", "strict_basic_week_date_time_no_millis");
    assertParseException("2018W313T12812+01:00", "strict_basic_week_date_time_no_millis");
    assertParses("2018-12-31", "strict_date");
    assertParseException("10000-12-31", "strict_date");
    assertParseException("2018-8-31", "strict_date");
    assertParses("2018-12-31T12", "strict_date_hour");
    assertParseException("2018-12-31T8", "strict_date_hour");
    assertParses("2018-12-31T12:12", "strict_date_hour_minute");
    assertParseException("2018-12-31T8:3", "strict_date_hour_minute");
    assertParses("2018-12-31T12:12:12", "strict_date_hour_minute_second");
    assertParseException("2018-12-31T12:12:1", "strict_date_hour_minute_second");
    assertParses("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.123456789", "strict_date_hour_minute_second_fraction");
    assertParses("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_millis");
    assertParses("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_millis");
    assertParses("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_fraction");
    assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_millis");
    assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_fraction");
    assertParses("2018-12-31", "strict_date_optional_time");
    assertParseException("2018-12-1", "strict_date_optional_time");
    assertParseException("2018-1-31", "strict_date_optional_time");
    assertParseException("10000-01-31", "strict_date_optional_time");
    assertParses("2010-01-05T02:00", "strict_date_optional_time");
    assertParses("2018-12-31T10:15:30", "strict_date_optional_time");
    assertParses("2018-12-31T10:15:30Z", "strict_date_optional_time");
    assertParses("2018-12-31T10:15:30+0100", "strict_date_optional_time");
    assertParses("2018-12-31T10:15:30+01:00", "strict_date_optional_time");
    assertParseException("2018-12-31T10:15:3", "strict_date_optional_time");
    assertParseException("2018-12-31T10:5:30", "strict_date_optional_time");
    assertParseException("2018-12-31T9:15:30", "strict_date_optional_time");
    assertParses("2015-01-04T00:00Z", "strict_date_optional_time");
    assertParses("2018-12-31T10:15:30.1Z", "strict_date_time");
    assertParses("2018-12-31T10:15:30.123Z", "strict_date_time");
    assertParses("2018-12-31T10:15:30.123456789Z", "strict_date_time");
    assertParses("2018-12-31T10:15:30.1+0100", "strict_date_time");
    assertParses("2018-12-31T10:15:30.123+0100", "strict_date_time");
    assertParses("2018-12-31T10:15:30.1+01:00", "strict_date_time");
    assertParses("2018-12-31T10:15:30.123+01:00", "strict_date_time");
    assertParses("2018-12-31T10:15:30.11Z", "strict_date_time");
    assertParses("2018-12-31T10:15:30.11+0100", "strict_date_time");
    assertParses("2018-12-31T10:15:30.11+01:00", "strict_date_time");
    assertParseException("2018-12-31T10:15:3.123Z", "strict_date_time");
    assertParseException("2018-12-31T10:5:30.123Z", "strict_date_time");
    assertParseException("2018-12-31T1:15:30.123Z", "strict_date_time");
    assertParses("2018-12-31T10:15:30Z", "strict_date_time_no_millis");
    assertParses("2018-12-31T10:15:30+0100", "strict_date_time_no_millis");
    assertParses("2018-12-31T10:15:30+01:00", "strict_date_time_no_millis");
    assertParseException("2018-12-31T10:5:30Z", "strict_date_time_no_millis");
    assertParseException("2018-12-31T10:15:3Z", "strict_date_time_no_millis");
    assertParseException("2018-12-31T1:15:30Z", "strict_date_time_no_millis");
    assertParses("12", "strict_hour");
    assertParses("01", "strict_hour");
    assertParseException("1", "strict_hour");
    assertParses("12:12", "strict_hour_minute");
    assertParses("12:01", "strict_hour_minute");
    assertParseException("12:1", "strict_hour_minute");
    assertParses("12:12:12", "strict_hour_minute_second");
    assertParses("12:12:01", "strict_hour_minute_second");
    assertParseException("12:12:1", "strict_hour_minute_second");
    assertParses("12:12:12.123", "strict_hour_minute_second_fraction");
    assertParses("12:12:12.123456789", "strict_hour_minute_second_fraction");
    assertParses("12:12:12.1", "strict_hour_minute_second_fraction");
    assertParseException("12:12:12", "strict_hour_minute_second_fraction");
    assertParses("12:12:12.123", "strict_hour_minute_second_millis");
    assertParses("12:12:12.1", "strict_hour_minute_second_millis");
    assertParseException("12:12:12", "strict_hour_minute_second_millis");
    assertParses("2018-128", "strict_ordinal_date");
    assertParseException("2018-1", "strict_ordinal_date");
    assertParses("2018-128T10:15:30.1Z", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.123Z", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.123456789Z", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.1+0100", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.123+0100", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.1+01:00", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30.123+01:00", "strict_ordinal_date_time");
    assertParseException("2018-1T10:15:30.123Z", "strict_ordinal_date_time");
    assertParses("2018-128T10:15:30Z", "strict_ordinal_date_time_no_millis");
    assertParses("2018-128T10:15:30+0100", "strict_ordinal_date_time_no_millis");
    assertParses("2018-128T10:15:30+01:00", "strict_ordinal_date_time_no_millis");
    assertParseException("2018-1T10:15:30Z", "strict_ordinal_date_time_no_millis");
    assertParses("10:15:30.1Z", "strict_time");
    assertParses("10:15:30.123Z", "strict_time");
    assertParses("10:15:30.123456789Z", "strict_time");
    assertParses("10:15:30.123+0100", "strict_time");
    assertParses("10:15:30.123+01:00", "strict_time");
    assertParseException("1:15:30.123Z", "strict_time");
    assertParseException("10:1:30.123Z", "strict_time");
    assertParseException("10:15:3.123Z", "strict_time");
    assertParseException("10:15:3.1", "strict_time");
    assertParseException("10:15:3Z", "strict_time");
    assertParses("10:15:30Z", "strict_time_no_millis");
    assertParses("10:15:30+0100", "strict_time_no_millis");
    assertParses("10:15:30+01:00", "strict_time_no_millis");
    assertParses("01:15:30Z", "strict_time_no_millis");
    assertParses("01:15:30+0100", "strict_time_no_millis");
    assertParses("01:15:30+01:00", "strict_time_no_millis");
    assertParseException("1:15:30Z", "strict_time_no_millis");
    assertParseException("10:5:30Z", "strict_time_no_millis");
    assertParseException("10:15:3Z", "strict_time_no_millis");
    assertParseException("10:15:3", "strict_time_no_millis");
    assertParses("T10:15:30.1Z", "strict_t_time");
    assertParses("T10:15:30.123Z", "strict_t_time");
    assertParses("T10:15:30.123456789Z", "strict_t_time");
    assertParses("T10:15:30.1+0100", "strict_t_time");
    assertParses("T10:15:30.123+0100", "strict_t_time");
    assertParses("T10:15:30.1+01:00", "strict_t_time");
    assertParses("T10:15:30.123+01:00", "strict_t_time");
    assertParseException("T1:15:30.123Z", "strict_t_time");
    assertParseException("T10:1:30.123Z", "strict_t_time");
    assertParseException("T10:15:3.123Z", "strict_t_time");
    assertParseException("T10:15:3.1", "strict_t_time");
    assertParseException("T10:15:3Z", "strict_t_time");
    assertParses("T10:15:30Z", "strict_t_time_no_millis");
    assertParses("T10:15:30+0100", "strict_t_time_no_millis");
    assertParses("T10:15:30+01:00", "strict_t_time_no_millis");
    assertParseException("T1:15:30Z", "strict_t_time_no_millis");
    assertParseException("T10:1:30Z", "strict_t_time_no_millis");
    assertParseException("T10:15:3Z", "strict_t_time_no_millis");
    assertParseException("T10:15:3", "strict_t_time_no_millis");
    assertParses("2012-W48-6", "strict_week_date");
    assertParses("2012-W01-6", "strict_week_date");
    assertParseException("2012-W1-6", "strict_week_date");
    assertParseException("2012-W1-8", "strict_week_date");
    assertParses("2012-W48-6", "strict_week_date");
    assertParses("2012-W01-6", "strict_week_date");
    assertParseException("2012-W1-6", "strict_week_date");
    assertParseException("2012-W01-8", "strict_week_date");
    assertParses("2012-W48-6T10:15:30.1Z", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.123Z", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.123456789Z", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.1+0100", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.123+0100", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.1+01:00", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30.123+01:00", "strict_week_date_time");
    assertParseException("2012-W1-6T10:15:30.123Z", "strict_week_date_time");
    assertParses("2012-W48-6T10:15:30Z", "strict_week_date_time_no_millis");
    assertParses("2012-W48-6T10:15:30+0100", "strict_week_date_time_no_millis");
    assertParses("2012-W48-6T10:15:30+01:00", "strict_week_date_time_no_millis");
    assertParseException("2012-W1-6T10:15:30Z", "strict_week_date_time_no_millis");
    assertParses("2012", "strict_year");
    assertParseException("1", "strict_year");
    assertParses("-2000", "strict_year");
    assertParses("2012-12", "strict_year_month");
    assertParseException("1-1", "strict_year_month");
    assertParses("2012-12-31", "strict_year_month_day");
    assertParseException("1-12-31", "strict_year_month_day");
    assertParseException("2012-1-31", "strict_year_month_day");
    assertParseException("2012-12-1", "strict_year_month_day");
    assertParses("2018", "strict_weekyear");
    assertParseException("1", "strict_weekyear");
    assertParses("2018", "strict_weekyear");
    assertParses("2017", "strict_weekyear");
    assertParseException("1", "strict_weekyear");
    assertParses("2018-W29", "strict_weekyear_week");
    assertParses("2018-W01", "strict_weekyear_week");
    assertParseException("2018-W1", "strict_weekyear_week");
    assertParses("2012-W31-5", "strict_weekyear_week_day");
    assertParseException("2012-W1-1", "strict_weekyear_week_day");
}
252946.361208elasticsearch
public void testTieredMergePolicySettingsUpdate() {
    IndexSettings indexSettings = indexSettings(Settings.EMPTY);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac(), 0.0001);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), ByteSizeValue.ofGb(8)).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), ByteSizeValue.ofGb(8).getMbFrac(), 0.0001);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), ByteSizeValue.ofGb(8).getMbFrac(), 0.0001);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), MergePolicyConfig.DEFAULT_MERGE_FACTOR, 0);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), MergePolicyConfig.DEFAULT_MERGE_FACTOR + 1).build()));
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), MergePolicyConfig.DEFAULT_MERGE_FACTOR + 1, 0);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build()));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0);
    IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build())));
    final Throwable cause = exc.getCause();
    assertThat(cause.getMessage(), containsString("must be <= 50.0"));
    indexSettings.updateIndexMetadata(newIndexMeta("index", Settings.EMPTY));
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), MergePolicyConfig.DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT.getMbFrac(), 0.0001);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
    assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), MergePolicyConfig.DEFAULT_MERGE_FACTOR, 0);
    assertEquals(((TieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
}
255101.8614136elasticsearch
public void testRestRecoveryAction() {
    final RestCatRecoveryAction action = new RestCatRecoveryAction();
    final int totalShards = randomIntBetween(1, 32);
    final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2));
    final int failedShards = totalShards - successfulShards;
    final Map<String, List<RecoveryState>> shardRecoveryStates = new HashMap<>();
    final List<RecoveryState> recoveryStates = new ArrayList<>();
    for (int i = 0; i < successfulShards; i++) {
        final RecoveryState state = mock(RecoveryState.class);
        when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i));
        final RecoveryState.Timer timer = mock(RecoveryState.Timer.class);
        final long startTime = randomLongBetween(0, new Date().getTime());
        when(timer.startTime()).thenReturn(startTime);
        final long time = randomLongBetween(1000000, 10 * 1000000);
        when(timer.time()).thenReturn(time);
        when(timer.stopTime()).thenReturn(startTime + time);
        when(state.getTimer()).thenReturn(timer);
        when(state.getRecoverySource()).thenReturn(TestShardRouting.buildRecoverySource());
        when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values()));
        final DiscoveryNode sourceNode = randomBoolean() ? mock(DiscoveryNode.class) : null;
        if (sourceNode != null) {
            when(sourceNode.getHostName()).thenReturn(randomAlphaOfLength(8));
        }
        when(state.getSourceNode()).thenReturn(sourceNode);
        final DiscoveryNode targetNode = mock(DiscoveryNode.class);
        when(targetNode.getHostName()).thenReturn(randomAlphaOfLength(8));
        when(state.getTargetNode()).thenReturn(targetNode);
        RecoveryState.Index index = mock(RecoveryState.Index.class);
        final int totalRecoveredFiles = randomIntBetween(1, 64);
        when(index.totalRecoverFiles()).thenReturn(totalRecoveredFiles);
        final int recoveredFileCount = randomIntBetween(0, totalRecoveredFiles);
        when(index.recoveredFileCount()).thenReturn(recoveredFileCount);
        when(index.recoveredFilesPercent()).thenReturn((100f * recoveredFileCount) / totalRecoveredFiles);
        when(index.totalFileCount()).thenReturn(randomIntBetween(totalRecoveredFiles, 2 * totalRecoveredFiles));
        final int totalRecoveredBytes = randomIntBetween(1, 1 << 24);
        when(index.totalRecoverBytes()).thenReturn((long) totalRecoveredBytes);
        final int recoveredBytes = randomIntBetween(0, totalRecoveredBytes);
        when(index.recoveredBytes()).thenReturn((long) recoveredBytes);
        when(index.recoveredBytesPercent()).thenReturn((100f * recoveredBytes) / totalRecoveredBytes);
        when(index.totalRecoverBytes()).thenReturn((long) randomIntBetween(totalRecoveredBytes, 2 * totalRecoveredBytes));
        when(state.getIndex()).thenReturn(index);
        final RecoveryState.Translog translog = mock(RecoveryState.Translog.class);
        final int translogOps = randomIntBetween(0, 1 << 18);
        when(translog.totalOperations()).thenReturn(translogOps);
        final int translogOpsRecovered = randomIntBetween(0, translogOps);
        when(translog.recoveredOperations()).thenReturn(translogOpsRecovered);
        when(translog.recoveredPercent()).thenReturn(translogOps == 0 ? 100f : (100f * translogOpsRecovered / translogOps));
        when(state.getTranslog()).thenReturn(translog);
        recoveryStates.add(state);
    }
    final List<RecoveryState> shuffle = new ArrayList<>(recoveryStates);
    Randomness.shuffle(shuffle);
    shardRecoveryStates.put("index", shuffle);
    final List<DefaultShardOperationFailedException> shardFailures = new ArrayList<>();
    final RecoveryResponse response = new RecoveryResponse(totalShards, successfulShards, failedShards, shardRecoveryStates, shardFailures);
    final Table table = action.buildRecoveryTable(null, response);
    assertNotNull(table);
    List<Table.Cell> headers = table.getHeaders();
    final List<String> expectedHeaders = Arrays.asList("index", "shard", "start_time", "start_time_millis", "stop_time", "stop_time_millis", "time", "type", "stage", "source_host", "source_node", "target_host", "target_node", "repository", "snapshot", "files", "files_recovered", "files_percent", "files_total", "bytes", "bytes_recovered", "bytes_percent", "bytes_total", "translog_ops", "translog_ops_recovered", "translog_ops_percent");
    for (int i = 0; i < expectedHeaders.size(); i++) {
        assertThat(headers.get(i).value, equalTo(expectedHeaders.get(i)));
    }
    assertThat(table.getRows().size(), equalTo(successfulShards));
    for (int i = 0; i < successfulShards; i++) {
        final RecoveryState state = recoveryStates.get(i);
        final List<Object> expectedValues = Arrays.asList("index", i, XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().startTime())), state.getTimer().startTime(), XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(state.getTimer().stopTime())), state.getTimer().stopTime(), new TimeValue(state.getTimer().time()), state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT), state.getStage().name().toLowerCase(Locale.ROOT), state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName(), state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName(), state.getTargetNode().getHostName(), state.getTargetNode().getName(), state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? "n/a" : ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository(), state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? "n/a" : ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName(), state.getIndex().totalRecoverFiles(), state.getIndex().recoveredFileCount(), percent(state.getIndex().recoveredFilesPercent()), state.getIndex().totalFileCount(), ByteSizeValue.ofBytes(state.getIndex().totalRecoverBytes()), ByteSizeValue.ofBytes(state.getIndex().recoveredBytes()), percent(state.getIndex().recoveredBytesPercent()), ByteSizeValue.ofBytes(state.getIndex().totalBytes()), state.getTranslog().totalOperations(), state.getTranslog().recoveredOperations(), percent(state.getTranslog().recoveredPercent()));
        final List<Table.Cell> cells = table.getRows().get(i);
        for (int j = 0; j < expectedValues.size(); j++) {
            assertThat(cells.get(j).value, equalTo(expectedValues.get(j)));
        }
    }
}
254028.8322131elasticsearch
private void termsAggregator(ValueType valueType, MappedFieldType fieldType, Function<Integer, T> valueFactory, Comparator<T> keyComparator, BiFunction<T, Boolean, List<IndexableField>> luceneFieldFactory) throws Exception {
    final Map<T, Integer> counts = new HashMap<>();
    final Map<T, Integer> filteredCounts = new HashMap<>();
    int numTerms = scaledRandomIntBetween(8, 128);
    for (int i = 0; i < numTerms; i++) {
        int numDocs = scaledRandomIntBetween(2, 32);
        T key = valueFactory.apply(i);
        counts.put(key, numDocs);
        filteredCounts.put(key, 0);
    }
    try (Directory directory = newDirectory()) {
        boolean multiValued = randomBoolean();
        try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
            if (multiValued == false) {
                for (Map.Entry<T, Integer> entry : counts.entrySet()) {
                    for (int i = 0; i < entry.getValue(); i++) {
                        Document document = new Document();
                        luceneFieldFactory.apply(entry.getKey(), false).forEach(document::add);
                        if (randomBoolean()) {
                            document.add(new StringField("include", "yes", Field.Store.NO));
                            filteredCounts.computeIfPresent(entry.getKey(), (key, integer) -> integer + 1);
                        }
                        indexWriter.addDocument(document);
                    }
                }
            } else {
                Iterator<Map.Entry<T, Integer>> iterator = counts.entrySet().iterator();
                while (iterator.hasNext()) {
                    Map.Entry<T, Integer> entry1 = iterator.next();
                    Map.Entry<T, Integer> entry2 = null;
                    if (randomBoolean() && iterator.hasNext()) {
                        entry2 = iterator.next();
                        if (entry1.getValue().compareTo(entry2.getValue()) < 0) {
                            Map.Entry<T, Integer> temp = entry1;
                            entry1 = entry2;
                            entry2 = temp;
                        }
                    }
                    for (int i = 0; i < entry1.getValue(); i++) {
                        Document document = new Document();
                        luceneFieldFactory.apply(entry1.getKey(), true).forEach(document::add);
                        if (entry2 != null && i < entry2.getValue()) {
                            luceneFieldFactory.apply(entry2.getKey(), true).forEach(document::add);
                        }
                        indexWriter.addDocument(document);
                    }
                }
            }
            try (DirectoryReader indexReader = maybeWrapReaderEs(indexWriter.getReader())) {
                boolean order = randomBoolean();
                List<Map.Entry<T, Integer>> expectedBuckets = new ArrayList<>();
                expectedBuckets.addAll(counts.entrySet());
                BucketOrder bucketOrder;
                Comparator<Map.Entry<T, Integer>> comparator;
                if (randomBoolean()) {
                    bucketOrder = BucketOrder.key(order);
                    comparator = Comparator.comparing(Map.Entry::getKey, keyComparator);
                } else {
                    bucketOrder = BucketOrder.compound(BucketOrder.count(order), BucketOrder.key(order));
                    comparator = Comparator.comparing(Map.Entry::getValue);
                    comparator = comparator.thenComparing(Comparator.comparing(Map.Entry::getKey, keyComparator));
                }
                if (order == false) {
                    comparator = comparator.reversed();
                }
                expectedBuckets.sort(comparator);
                int size = randomIntBetween(1, counts.size());
                String executionHint = randomFrom(TermsAggregatorFactory.ExecutionMode.values()).toString();
                logger.info("bucket_order={} size={} execution_hint={}", bucketOrder, size, executionHint);
                AggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name").userValueTypeHint(valueType).executionHint(executionHint).size(size).shardSize(size).field("field").order(bucketOrder);
                Terms result = searchAndReduce(indexReader, new AggTestConfig(aggregationBuilder, fieldType).withSplitLeavesIntoSeperateAggregators(false));
                assertEquals(size, result.getBuckets().size());
                for (int i = 0; i < size; i++) {
                    Map.Entry<T, Integer> expected = expectedBuckets.get(i);
                    Terms.Bucket actual = result.getBuckets().get(i);
                    if (valueType == ValueType.IP) {
                        assertEquals(String.valueOf(expected.getKey()).substring(1), actual.getKey());
                    } else {
                        assertEquals(expected.getKey(), actual.getKey());
                    }
                    assertEquals(expected.getValue().longValue(), actual.getDocCount());
                }
                if (multiValued == false) {
                    MappedFieldType filterFieldType = new KeywordFieldMapper.KeywordFieldType("include");
                    aggregationBuilder = new FilterAggregationBuilder("_name1", QueryBuilders.termQuery("include", "yes"));
                    aggregationBuilder.subAggregation(new TermsAggregationBuilder("_name2").userValueTypeHint(valueType).executionHint(executionHint).size(numTerms).collectMode(randomFrom(Aggregator.SubAggCollectionMode.values())).field("field"));
                    result = ((Filter) searchAndReduce(indexReader, new AggTestConfig(aggregationBuilder, fieldType, filterFieldType).withSplitLeavesIntoSeperateAggregators(false))).getAggregations().get("_name2");
                    int expectedFilteredCounts = 0;
                    for (Integer count : filteredCounts.values()) {
                        if (count > 0) {
                            expectedFilteredCounts++;
                        }
                    }
                    assertEquals(expectedFilteredCounts, result.getBuckets().size());
                    for (Terms.Bucket actual : result.getBuckets()) {
                        Integer expectedCount;
                        if (valueType == ValueType.IP) {
                            expectedCount = filteredCounts.get(InetAddresses.forString((String) actual.getKey()));
                        } else {
                            expectedCount = filteredCounts.get(actual.getKey());
                        }
                        assertEquals(expectedCount.longValue(), actual.getDocCount());
                    }
                }
            }
        }
    }
}
253623.991187elasticsearch
public void testGroupClusterIndices() throws IOException {
    List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
    try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, VersionInformation.CURRENT, TransportVersion.current());
        MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, VersionInformation.CURRENT, TransportVersion.current())) {
        DiscoveryNode cluster1Seed = cluster1Transport.getLocalDiscoNode();
        DiscoveryNode cluster2Seed = cluster2Transport.getLocalDiscoNode();
        knownNodes.add(cluster1Transport.getLocalDiscoNode());
        knownNodes.add(cluster2Transport.getLocalDiscoNode());
        Collections.shuffle(knownNodes, random());
        try (MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, VersionInformation.CURRENT, TransportVersion.current(), threadPool, null)) {
            transportService.start();
            transportService.acceptIncomingRequests();
            Settings.Builder builder = Settings.builder();
            builder.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString());
            builder.putList("cluster.remote.cluster_2.seeds", cluster2Seed.getAddress().toString());
            try (RemoteClusterService service = new RemoteClusterService(builder.build(), transportService)) {
                assertFalse(service.isCrossClusterSearchEnabled());
                service.initializeRemoteClusters();
                assertTrue(service.isCrossClusterSearchEnabled());
                assertTrue(service.isRemoteClusterRegistered("cluster_1"));
                assertTrue(service.isRemoteClusterRegistered("cluster_2"));
                assertFalse(service.isRemoteClusterRegistered("foo"));
                {
                    Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo" });
                    List<String> localIndices = perClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
                    assertNotNull(localIndices);
                    assertEquals("foo", localIndices.get(0));
                    assertEquals(2, perClusterIndices.size());
                    assertEquals(Arrays.asList("bar", "test", "baz", "boo"), perClusterIndices.get("cluster_1"));
                    assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2"));
                }
                expectThrows(NoSuchRemoteClusterException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "foo:bar", "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo" }));
                expectThrows(NoSuchRemoteClusterException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "does_not_exist:*" }));
                {
                    String[] indices = shuffledList(List.of("cluster*:foo*", "foo", "-cluster_1:*", "*:boo")).toArray(new String[0]);
                    Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), indices);
                    assertEquals(2, perClusterIndices.size());
                    List<String> localIndexes = perClusterIndices.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
                    assertNotNull(localIndexes);
                    assertEquals(1, localIndexes.size());
                    assertEquals("foo", localIndexes.get(0));
                    List<String> cluster2 = perClusterIndices.get("cluster_2");
                    assertNotNull(cluster2);
                    assertEquals(2, cluster2.size());
                    assertEquals(List.of("boo", "foo*"), cluster2.stream().sorted().toList());
                }
                {
                    String[] indices = shuffledList(List.of("*:*", "-clu*_1:*", "*:boo")).toArray(new String[0]);
                    Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), indices);
                    assertEquals(1, perClusterIndices.size());
                    List<String> cluster2 = perClusterIndices.get("cluster_2");
                    assertNotNull(cluster2);
                    assertEquals(2, cluster2.size());
                    assertEquals(List.of("*", "boo"), cluster2.stream().sorted().toList());
                }
                {
                    String[] indices = shuffledList(List.of("cluster*:foo*", "cluster_2:*", "foo", "-cluster_1:*", "-c*:*")).toArray(new String[0]);
                    Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), indices);
                    assertEquals(1, perClusterIndices.size());
                    List<String> localIndexes = perClusterIndices.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
                    assertNotNull(localIndexes);
                    assertEquals(1, localIndexes.size());
                    assertEquals("foo", localIndexes.get(0));
                }
                {
                    String[] indices = shuffledList(List.of("cluster*:*", "foo", "-*:*")).toArray(new String[0]);
                    Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), indices);
                    assertEquals(1, perClusterIndices.size());
                    List<String> localIndexes = perClusterIndices.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
                    assertNotNull(localIndexes);
                    assertEquals(1, localIndexes.size());
                    assertEquals("foo", localIndexes.get(0));
                }
                {
                    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "cluster_1:bar", "-cluster_2:foo*", "cluster_1:test", "cluster_2:foo*", "foo" }));
                    assertThat(e.getMessage(), equalTo("To exclude a cluster you must specify the '*' wildcard for the index expression, but found: [foo*]"));
                }
                {
                    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "-cluster_1:*", "cluster_2:foo*", "foo" }));
                    assertThat(e.getMessage(), equalTo("Attempt to exclude cluster [cluster_1] failed as it is not included in the list of clusters to " + "be included: [(local), cluster_2]. Input: [-cluster_1:*,cluster_2:foo*,foo]"));
                }
                {
                    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "-cluster_1:*" }));
                    assertThat(e.getMessage(), equalTo("Attempt to exclude cluster [cluster_1] failed as it is not included in the list of clusters to " + "be included: []. Input: [-cluster_1:*]"));
                }
                {
                    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), new String[] { "-*:*" }));
                    assertThat(e.getMessage(), equalTo("Attempt to exclude clusters [cluster_1, cluster_2] failed as they are not included in the list of " + "clusters to be included: []. Input: [-*:*]"));
                }
                {
                    String[] indices = shuffledList(List.of("cluster*:*", "*:foo", "-*:*")).toArray(new String[0]);
                    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(service.getRemoteClusterNames(), indices));
                    assertThat(e.getMessage(), containsString("The '-' exclusions in the index expression list excludes all indexes. Nothing to search."));
                }
            }
        }
    }
}
254674.789146elasticsearch
public void testContext() {
    ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build();
    ClusterInfo info = ClusterInfo.EMPTY;
    SortedSet<String> roleNames = randomRoles();
    boolean hasDataRole = roleNames.stream().anyMatch(r -> DiscoveryNodeRole.getRoleFromRoleName(r).canContainData());
    AutoscalingCalculateCapacityService service = new AutoscalingCalculateCapacityService(Set.of(new FixedAutoscalingDeciderService()));
    SnapshotShardSizeInfo snapshotShardSizeInfo = new SnapshotShardSizeInfo(Map.of());
    AutoscalingDeciderContext context = service.createContext(roleNames, state, info, snapshotShardSizeInfo, n -> Optional.of(new AutoscalingNodeInfo(randomNonNegativeLong(), randomProcessors())), () -> {
    });
    assertSame(state, context.state());
    assertThat(context.nodes(), equalTo(Set.of()));
    assertThat(context.currentCapacity(), equalTo(AutoscalingCapacity.ZERO));
    assertThat(context.info(), sameInstance(info));
    assertThat(context.snapshotShardSizeInfo(), sameInstance(snapshotShardSizeInfo));
    Set<DiscoveryNodeRole> roles = roleNames.stream().map(DiscoveryNodeRole::getRoleFromRoleName).collect(Collectors.toSet());
    Set<DiscoveryNodeRole> otherRoles = mutateRoles(roleNames).stream().map(DiscoveryNodeRole::getRoleFromRoleName).collect(Collectors.toSet());
    final long memory = between(0, 1000);
    state = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("nodeId", buildNewFakeTransportAddress(), Map.of(), roles))).build();
    context = new AutoscalingCalculateCapacityService.DefaultAutoscalingDeciderContext(roleNames, state, info, null, n -> Optional.of(new AutoscalingNodeInfo(memory, randomProcessors())), () -> {
    });
    assertThat(context.nodes().size(), equalTo(1));
    assertThat(context.nodes(), equalTo(new HashSet<>(state.nodes().getAllNodes())));
    if (hasDataRole) {
        assertNull(context.currentCapacity());
    } else {
        assertThat(context.currentCapacity().node().memory(), equalTo(ByteSizeValue.ofBytes(memory)));
        assertThat(context.currentCapacity().total().memory(), equalTo(ByteSizeValue.ofBytes(memory)));
        assertThat(context.currentCapacity().node().storage(), equalTo(ByteSizeValue.ZERO));
        assertThat(context.currentCapacity().total().storage(), equalTo(ByteSizeValue.ZERO));
    }
    Map<String, DiskUsage> leastUsages = new HashMap<>();
    Map<String, DiskUsage> mostUsages = new HashMap<>();
    DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
    Set<DiscoveryNode> expectedNodes = new HashSet<>();
    long sumTotal = 0;
    long maxTotal = 0;
    for (int i = 0; i < randomIntBetween(1, 5); ++i) {
        String nodeId = "nodeId" + i;
        boolean useOtherRoles = randomBoolean();
        DiscoveryNode node = DiscoveryNodeUtils.create(nodeId, buildNewFakeTransportAddress(), Map.of(), useOtherRoles ? otherRoles : roles);
        nodes.add(node);
        if (useOtherRoles == false) {
            long total = randomLongBetween(1, 1L << 40);
            DiskUsage diskUsage = new DiskUsage(nodeId, null, randomAlphaOfLength(5), total, randomLongBetween(0, total));
            leastUsages.put(nodeId, diskUsage);
            if (randomBoolean()) {
                diskUsage = new DiskUsage(nodeId, null, diskUsage.path(), total, diskUsage.freeBytes());
            }
            mostUsages.put(nodeId, diskUsage);
            sumTotal += total;
            maxTotal = Math.max(total, maxTotal);
            expectedNodes.add(node);
        } else {
            long total1 = randomLongBetween(0, 1L << 40);
            leastUsages.put(nodeId, new DiskUsage(nodeId, null, randomAlphaOfLength(5), total1, randomLongBetween(0, total1)));
            long total2 = randomLongBetween(0, 1L << 40);
            mostUsages.put(nodeId, new DiskUsage(nodeId, null, randomAlphaOfLength(5), total2, randomLongBetween(0, total2)));
        }
    }
    state = ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).build();
    info = new ClusterInfo(leastUsages, mostUsages, Map.of(), Map.of(), Map.of(), Map.of());
    context = new AutoscalingCalculateCapacityService.DefaultAutoscalingDeciderContext(roleNames, state, info, null, n -> Optional.of(new AutoscalingNodeInfo(memory, randomProcessors())), () -> {
    });
    assertThat(context.nodes(), equalTo(expectedNodes));
    if (hasDataRole) {
        assertThat(context.currentCapacity().node().storage(), equalTo(ByteSizeValue.ofBytes(maxTotal)));
        assertThat(context.currentCapacity().total().storage(), equalTo(ByteSizeValue.ofBytes(sumTotal)));
    } else {
        assertThat(context.currentCapacity().node().storage(), equalTo(ByteSizeValue.ZERO));
        assertThat(context.currentCapacity().total().storage(), equalTo(ByteSizeValue.ZERO));
    }
    assertThat(context.currentCapacity().node().memory(), equalTo(ByteSizeValue.ofBytes(memory * Integer.signum(expectedNodes.size()))));
    assertThat(context.currentCapacity().total().memory(), equalTo(ByteSizeValue.ofBytes(memory * expectedNodes.size())));
    if (expectedNodes.isEmpty() == false) {
        context = new AutoscalingCalculateCapacityService.DefaultAutoscalingDeciderContext(roleNames, state, info, null, AutoscalingNodesInfo.EMPTY, () -> {
        });
        assertThat(context.nodes(), equalTo(expectedNodes));
        assertThat(context.currentCapacity(), is(nullValue()));
        String multiPathNodeId = randomFrom(expectedNodes).getId();
        DiskUsage original = mostUsages.get(multiPathNodeId);
        mostUsages.put(multiPathNodeId, new DiskUsage(multiPathNodeId, null, randomValueOtherThan(original.path(), () -> randomAlphaOfLength(5)), original.totalBytes(), original.freeBytes()));
        info = new ClusterInfo(leastUsages, mostUsages, Map.of(), Map.of(), Map.of(), Map.of());
        context = new AutoscalingCalculateCapacityService.DefaultAutoscalingDeciderContext(roleNames, state, info, null, n -> Optional.of(new AutoscalingNodeInfo(memory, randomProcessors())), () -> {
        });
        assertThat(context.nodes(), equalTo(expectedNodes));
        if (hasDataRole) {
            assertThat(context.currentCapacity(), is(nullValue()));
        } else {
            assertThat(context.currentCapacity().node().memory(), equalTo(ByteSizeValue.ofBytes(memory)));
            assertThat(context.currentCapacity().total().memory(), equalTo(ByteSizeValue.ofBytes(memory * expectedNodes.size())));
            assertThat(context.currentCapacity().node().storage(), equalTo(ByteSizeValue.ZERO));
            assertThat(context.currentCapacity().total().storage(), equalTo(ByteSizeValue.ZERO));
        }
    }
}
252969.572189elasticsearch
public static Iterable<Object[]> parameters() {
    String read = "Attribute[channel=0]";
    Function<String, String> evaluatorName = s -> "ToUnsignedLongFrom" + s + "Evaluator[field=" + read + "]";
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    TestCaseSupplier.forUnaryUnsignedLong(suppliers, read, DataTypes.UNSIGNED_LONG, n -> n, BigInteger.ZERO, UNSIGNED_LONG_MAX, List.of());
    TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataTypes.UNSIGNED_LONG, b -> b ? BigInteger.ONE : BigInteger.ZERO, List.of());
    TestCaseSupplier.forUnaryDatetime(suppliers, evaluatorName.apply("Long"), DataTypes.UNSIGNED_LONG, instant -> BigInteger.valueOf(instant.toEpochMilli()), List.of());
    TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("String"), DataTypes.UNSIGNED_LONG, bytesRef -> null, bytesRef -> {
        Exception e = expectThrows(NumberFormatException.class, () -> new BigDecimal(bytesRef.utf8ToString()));
        return List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.NumberFormatException: " + e.getMessage());
    });
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.UNSIGNED_LONG, d -> BigDecimal.valueOf(d).toBigInteger(), 0d, UNSIGNED_LONG_MAX_AS_DOUBLE, List.of());
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.UNSIGNED_LONG, d -> null, Double.NEGATIVE_INFINITY, -1d, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [unsigned_long] range"));
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.UNSIGNED_LONG, d -> null, UNSIGNED_LONG_MAX_AS_DOUBLE + 10e5, Double.POSITIVE_INFINITY, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [unsigned_long] range"));
    TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("Long"), DataTypes.UNSIGNED_LONG, BigInteger::valueOf, 0L, Long.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryLong(suppliers, evaluatorName.apply("Long"), DataTypes.UNSIGNED_LONG, unused -> null, Long.MIN_VALUE, -1L, l -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + l + "] out of [unsigned_long] range"));
    TestCaseSupplier.forUnaryInt(suppliers, evaluatorName.apply("Int"), DataTypes.UNSIGNED_LONG, BigInteger::valueOf, 0, Integer.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryInt(suppliers, evaluatorName.apply("Int"), DataTypes.UNSIGNED_LONG, unused -> null, Integer.MIN_VALUE, -1, l -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + l + "] out of [unsigned_long] range"));
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.ulongCases(BigInteger.ZERO, UNSIGNED_LONG_MAX, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.UNSIGNED_LONG, bytesRef -> safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString()), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(0, UNSIGNED_LONG_MAX_AS_DOUBLE, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.UNSIGNED_LONG, bytesRef -> safeToUnsignedLong(((BytesRef) bytesRef).utf8ToString()), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Double.NEGATIVE_INFINITY, -1d, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.UNSIGNED_LONG, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + ((BytesRef) bytesRef).utf8ToString() + "] out of [unsigned_long] range"));
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(UNSIGNED_LONG_MAX_AS_DOUBLE + 10e5, Double.POSITIVE_INFINITY, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.UNSIGNED_LONG, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + ((BytesRef) bytesRef).utf8ToString() + "] out of [unsigned_long] range"));
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
253355.534191elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.addAll(TestCaseSupplier.forBinaryWithWidening(new TestCaseSupplier.NumericTypeTestConfigs<Number>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> l.intValue() + r.intValue(), "AddIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> l.longValue() + r.longValue(), "AddLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> l.doubleValue() + r.doubleValue(), "AddDoublesEvaluator")), "lhs", "rhs", (lhs, rhs) -> List.of(), true));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("AddUnsignedLongsEvaluator", "lhs", "rhs", (l, r) -> (((BigInteger) l).add((BigInteger) r)), DataTypes.UNSIGNED_LONG, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), List.of(), true));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting((lhs, rhs) -> ((Period) lhs).plus((Period) rhs), EsqlDataTypes.DATE_PERIOD, TestCaseSupplier.datePeriodCases(), TestCaseSupplier.datePeriodCases(), startsWith("LiteralsEvaluator[lit="), (lhs, rhs) -> List.of(), true));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting((lhs, rhs) -> ((Duration) lhs).plus((Duration) rhs), EsqlDataTypes.TIME_DURATION, TestCaseSupplier.timeDurationCases(), TestCaseSupplier.timeDurationCases(), startsWith("LiteralsEvaluator[lit="), (lhs, rhs) -> List.of(), true));
    BinaryOperator<Object> result = (lhs, rhs) -> {
        try {
            return addDatesAndTemporalAmount(lhs, rhs);
        } catch (ArithmeticException e) {
            return null;
        }
    };
    BiFunction<TestCaseSupplier.TypedData, TestCaseSupplier.TypedData, List<String>> warnings = (lhs, rhs) -> {
        try {
            addDatesAndTemporalAmount(lhs.data(), rhs.data());
            return List.of();
        } catch (ArithmeticException e) {
            return List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: long overflow");
        }
    };
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting(result, DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.datePeriodCases(), startsWith("AddDatetimesEvaluator[datetime=Attribute[channel=0], temporalAmount="), warnings, true));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting(result, DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.timeDurationCases(), startsWith("AddDatetimesEvaluator[datetime=Attribute[channel=0], temporalAmount="), warnings, true));
    suppliers.addAll(TestCaseSupplier.dateCases().stream().<TestCaseSupplier>mapMulti((tds, consumer) -> {
        consumer.accept(new TestCaseSupplier(List.of(DataTypes.DATETIME, DataTypes.NULL), () -> new TestCaseSupplier.TestCase(List.of(tds.get(), TestCaseSupplier.TypedData.NULL), "LiteralsEvaluator[lit=null]", DataTypes.DATETIME, nullValue())));
        consumer.accept(new TestCaseSupplier(List.of(DataTypes.NULL, DataTypes.DATETIME), () -> new TestCaseSupplier.TestCase(List.of(TestCaseSupplier.TypedData.NULL, tds.get()), "LiteralsEvaluator[lit=null]", DataTypes.DATETIME, nullValue())));
    }).toList());
    suppliers = anyNullIsNull(suppliers, (nullPosition, nullType, original) -> original.expectedType(), (nullPosition, nullData, original) -> nullData.isForceLiteral() ? equalTo("LiteralsEvaluator[lit=null]") : original);
    suppliers = errorsForCasesWithoutExamples(suppliers, AddTests::addErrorMessageString);
    suppliers.addAll(List.of(new TestCaseSupplier("MV", () -> {
        int rhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        int lhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        int lhs2 = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(List.of(lhs, lhs2), DataTypes.INTEGER, "lhs"), new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs")), "AddIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, is(nullValue())).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.").withWarning("Line -1:-1: java.lang.IllegalArgumentException: single-value function encountered multi-value");
    })));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.INTEGER, () -> randomIntBetween(1, Integer.MAX_VALUE), () -> Integer.MAX_VALUE, "AddIntsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.INTEGER, () -> randomIntBetween(Integer.MIN_VALUE, -1), () -> Integer.MIN_VALUE, "AddIntsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.LONG, () -> randomLongBetween(1L, Long.MAX_VALUE), () -> Long.MAX_VALUE, "AddLongsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.LONG, () -> randomLongBetween(Long.MIN_VALUE, -1L), () -> Long.MIN_VALUE, "AddLongsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.UNSIGNED_LONG, () -> asLongUnsigned(randomBigInteger()), () -> asLongUnsigned(UNSIGNED_LONG_MAX), "AddUnsignedLongsEvaluator"));
    return parameterSuppliersFromTypedData(suppliers);
}
254144.51177elasticsearch
public void testInferModels() throws Exception {
    String modelId1 = "test-load-models-regression";
    String modelId2 = "test-load-models-classification";
    Map<String, String> oneHotEncoding = new HashMap<>();
    oneHotEncoding.put("cat", "animal_cat");
    oneHotEncoding.put("dog", "animal_dog");
    TrainedModelConfig config1 = buildTrainedModelConfigBuilder(modelId2).setInput(new TrainedModelInput(Arrays.asList("field.foo", "field.bar", "other.categorical"))).setParsedDefinition(new TrainedModelDefinition.Builder().setPreProcessors(Arrays.asList(new OneHotEncoding("other.categorical", oneHotEncoding, false))).setTrainedModel(buildClassification(true))).setVersion(MlConfigVersion.CURRENT).setLicenseLevel(License.OperationMode.PLATINUM.description()).setCreateTime(Instant.now()).setEstimatedOperations(0).setModelSize(0).build();
    TrainedModelConfig config2 = buildTrainedModelConfigBuilder(modelId1).setInput(new TrainedModelInput(Arrays.asList("field.foo", "field.bar", "other.categorical"))).setParsedDefinition(new TrainedModelDefinition.Builder().setPreProcessors(Arrays.asList(new OneHotEncoding("other.categorical", oneHotEncoding, false))).setTrainedModel(buildRegression())).setVersion(MlConfigVersion.CURRENT).setEstimatedOperations(0).setModelSize(0).setCreateTime(Instant.now()).build();
    AtomicReference<Boolean> putConfigHolder = new AtomicReference<>();
    AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
    blockingCall(listener -> trainedModelProvider.storeTrainedModel(config1, listener), putConfigHolder, exceptionHolder);
    assertThat(putConfigHolder.get(), is(true));
    assertThat(exceptionHolder.get(), is(nullValue()));
    blockingCall(listener -> trainedModelProvider.storeTrainedModel(config2, listener), putConfigHolder, exceptionHolder);
    assertThat(putConfigHolder.get(), is(true));
    assertThat(exceptionHolder.get(), is(nullValue()));
    List<Map<String, Object>> toInfer = new ArrayList<>();
    toInfer.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 1.0);
                    put("bar", 0.5);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "dog");
                }
            });
        }
    });
    toInfer.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 0.9);
                    put("bar", 1.5);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "cat");
                }
            });
        }
    });
    List<Map<String, Object>> toInfer2 = new ArrayList<>();
    toInfer2.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 0.0);
                    put("bar", 0.01);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "dog");
                }
            });
        }
    });
    toInfer2.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 1.0);
                    put("bar", 0.0);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "cat");
                }
            });
        }
    });
    InferModelAction.Request request = InferModelAction.Request.forIngestDocs(modelId1, toInfer, RegressionConfigUpdate.EMPTY_PARAMS, true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    InferModelAction.Response response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    assertThat(response.getInferenceResults().stream().map(i -> ((SingleValueInferenceResults) i).value()).collect(Collectors.toList()), contains(1.3, 1.25));
    request = InferModelAction.Request.forIngestDocs(modelId1, toInfer2, RegressionConfigUpdate.EMPTY_PARAMS, true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    assertThat(response.getInferenceResults().stream().map(i -> ((SingleValueInferenceResults) i).value()).collect(Collectors.toList()), contains(1.65, 1.55));
    request = InferModelAction.Request.forIngestDocs(modelId2, toInfer, ClassificationConfigUpdate.EMPTY_PARAMS, true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    assertThat(response.getInferenceResults().stream().map(i -> ((SingleValueInferenceResults) i).valueAsString()).collect(Collectors.toList()), contains("no", "yes"));
    request = InferModelAction.Request.forIngestDocs(modelId2, toInfer, new ClassificationConfigUpdate(2, null, null, null, null), true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    ClassificationInferenceResults classificationInferenceResults = (ClassificationInferenceResults) response.getInferenceResults().get(0);
    assertThat(classificationInferenceResults.getTopClasses().get(0).getClassification(), equalTo("no"));
    assertThat(classificationInferenceResults.getTopClasses().get(1).getClassification(), equalTo("yes"));
    assertThat(classificationInferenceResults.getTopClasses().get(0).getProbability(), greaterThan(classificationInferenceResults.getTopClasses().get(1).getProbability()));
    classificationInferenceResults = (ClassificationInferenceResults) response.getInferenceResults().get(1);
    assertThat(classificationInferenceResults.getTopClasses().get(0).getClassification(), equalTo("yes"));
    assertThat(classificationInferenceResults.getTopClasses().get(1).getClassification(), equalTo("no"));
    assertThat(classificationInferenceResults.getTopClasses().get(0).getProbability(), greaterThan(classificationInferenceResults.getTopClasses().get(1).getProbability()));
    request = InferModelAction.Request.forIngestDocs(modelId2, toInfer2, new ClassificationConfigUpdate(1, null, null, null, null), true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    classificationInferenceResults = (ClassificationInferenceResults) response.getInferenceResults().get(0);
    assertThat(classificationInferenceResults.getTopClasses(), hasSize(1));
    assertThat(classificationInferenceResults.getTopClasses().get(0).getClassification(), equalTo("yes"));
}
256018.537146elasticsearch
public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
    List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> actionHandlers = new ArrayList<>();
    actionHandlers.add(new ActionHandler<>(XPackUsageFeatureAction.MACHINE_LEARNING, MachineLearningUsageTransportAction.class));
    actionHandlers.add(new ActionHandler<>(XPackInfoFeatureAction.MACHINE_LEARNING, MachineLearningInfoTransportAction.class));
    if (false == enabled) {
        return actionHandlers;
    }
    actionHandlers.add(new ActionHandler<>(AuditMlNotificationAction.INSTANCE, TransportAuditMlNotificationAction.class));
    actionHandlers.add(new ActionHandler<>(MlInfoAction.INSTANCE, TransportMlInfoAction.class));
    actionHandlers.add(new ActionHandler<>(MlMemoryAction.INSTANCE, TransportMlMemoryAction.class));
    actionHandlers.add(new ActionHandler<>(SetUpgradeModeAction.INSTANCE, TransportSetUpgradeModeAction.class));
    actionHandlers.add(new ActionHandler<>(SetResetModeAction.INSTANCE, TransportSetResetModeAction.class));
    actionHandlers.add(new ActionHandler<>(TrainedModelCacheInfoAction.INSTANCE, TransportTrainedModelCacheInfoAction.class));
    actionHandlers.add(new ActionHandler<>(GetMlAutoscalingStats.INSTANCE, TransportGetMlAutoscalingStats.class));
    if (machineLearningExtension.get().isAnomalyDetectionEnabled()) {
        actionHandlers.add(new ActionHandler<>(GetJobsAction.INSTANCE, TransportGetJobsAction.class));
        actionHandlers.add(new ActionHandler<>(GetJobsStatsAction.INSTANCE, TransportGetJobsStatsAction.class));
        actionHandlers.add(new ActionHandler<>(PutJobAction.INSTANCE, TransportPutJobAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateJobAction.INSTANCE, TransportUpdateJobAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteJobAction.INSTANCE, TransportDeleteJobAction.class));
        actionHandlers.add(new ActionHandler<>(OpenJobAction.INSTANCE, TransportOpenJobAction.class));
        actionHandlers.add(new ActionHandler<>(GetFiltersAction.INSTANCE, TransportGetFiltersAction.class));
        actionHandlers.add(new ActionHandler<>(PutFilterAction.INSTANCE, TransportPutFilterAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateFilterAction.INSTANCE, TransportUpdateFilterAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteFilterAction.INSTANCE, TransportDeleteFilterAction.class));
        actionHandlers.add(new ActionHandler<>(KillProcessAction.INSTANCE, TransportKillProcessAction.class));
        actionHandlers.add(new ActionHandler<>(GetBucketsAction.INSTANCE, TransportGetBucketsAction.class));
        actionHandlers.add(new ActionHandler<>(GetInfluencersAction.INSTANCE, TransportGetInfluencersAction.class));
        actionHandlers.add(new ActionHandler<>(GetOverallBucketsAction.INSTANCE, TransportGetOverallBucketsAction.class));
        actionHandlers.add(new ActionHandler<>(GetRecordsAction.INSTANCE, TransportGetRecordsAction.class));
        actionHandlers.add(new ActionHandler<>(PostDataAction.INSTANCE, TransportPostDataAction.class));
        actionHandlers.add(new ActionHandler<>(CloseJobAction.INSTANCE, TransportCloseJobAction.class));
        actionHandlers.add(new ActionHandler<>(FinalizeJobExecutionAction.INSTANCE, TransportFinalizeJobExecutionAction.class));
        actionHandlers.add(new ActionHandler<>(FlushJobAction.INSTANCE, TransportFlushJobAction.class));
        actionHandlers.add(new ActionHandler<>(ResetJobAction.INSTANCE, TransportResetJobAction.class));
        actionHandlers.add(new ActionHandler<>(ValidateDetectorAction.INSTANCE, TransportValidateDetectorAction.class));
        actionHandlers.add(new ActionHandler<>(ValidateJobConfigAction.INSTANCE, TransportValidateJobConfigAction.class));
        actionHandlers.add(new ActionHandler<>(EstimateModelMemoryAction.INSTANCE, TransportEstimateModelMemoryAction.class));
        actionHandlers.add(new ActionHandler<>(GetCategoriesAction.INSTANCE, TransportGetCategoriesAction.class));
        actionHandlers.add(new ActionHandler<>(GetModelSnapshotsAction.INSTANCE, TransportGetModelSnapshotsAction.class));
        actionHandlers.add(new ActionHandler<>(RevertModelSnapshotAction.INSTANCE, TransportRevertModelSnapshotAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateModelSnapshotAction.INSTANCE, TransportUpdateModelSnapshotAction.class));
        actionHandlers.add(new ActionHandler<>(GetDatafeedsAction.INSTANCE, TransportGetDatafeedsAction.class));
        actionHandlers.add(new ActionHandler<>(GetDatafeedsStatsAction.INSTANCE, TransportGetDatafeedsStatsAction.class));
        actionHandlers.add(new ActionHandler<>(PutDatafeedAction.INSTANCE, TransportPutDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateDatafeedAction.INSTANCE, TransportUpdateDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteDatafeedAction.INSTANCE, TransportDeleteDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(PreviewDatafeedAction.INSTANCE, TransportPreviewDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(StartDatafeedAction.INSTANCE, TransportStartDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(StopDatafeedAction.INSTANCE, TransportStopDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(IsolateDatafeedAction.INSTANCE, TransportIsolateDatafeedAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteModelSnapshotAction.INSTANCE, TransportDeleteModelSnapshotAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateProcessAction.INSTANCE, TransportUpdateProcessAction.class));
        actionHandlers.add(new ActionHandler<>(ForecastJobAction.INSTANCE, TransportForecastJobAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteForecastAction.INSTANCE, TransportDeleteForecastAction.class));
        actionHandlers.add(new ActionHandler<>(GetCalendarsAction.INSTANCE, TransportGetCalendarsAction.class));
        actionHandlers.add(new ActionHandler<>(PutCalendarAction.INSTANCE, TransportPutCalendarAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteCalendarAction.INSTANCE, TransportDeleteCalendarAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteCalendarEventAction.INSTANCE, TransportDeleteCalendarEventAction.class));
        actionHandlers.add(new ActionHandler<>(UpdateCalendarJobAction.INSTANCE, TransportUpdateCalendarJobAction.class));
        actionHandlers.add(new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class));
        actionHandlers.add(new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class));
        actionHandlers.add(new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class));
        actionHandlers.add(new ActionHandler<>(UpgradeJobModelSnapshotAction.INSTANCE, TransportUpgradeJobModelSnapshotAction.class));
        actionHandlers.add(new ActionHandler<>(CancelJobModelSnapshotUpgradeAction.INSTANCE, TransportCancelJobModelSnapshotUpgradeAction.class));
        actionHandlers.add(new ActionHandler<>(GetJobModelSnapshotsUpgradeStatsAction.INSTANCE, TransportGetJobModelSnapshotsUpgradeStatsAction.class));
        actionHandlers.add(new ActionHandler<>(GetDatafeedRunningStateAction.INSTANCE, TransportGetDatafeedRunningStateAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteExpiredDataAction.INSTANCE, TransportDeleteExpiredDataAction.class));
    }
    if (machineLearningExtension.get().isDataFrameAnalyticsEnabled() || machineLearningExtension.get().isNlpEnabled()) {
        actionHandlers.add(new ActionHandler<>(GetTrainedModelsAction.INSTANCE, TransportGetTrainedModelsAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteTrainedModelAction.INSTANCE, TransportDeleteTrainedModelAction.class));
        actionHandlers.add(new ActionHandler<>(GetTrainedModelsStatsAction.INSTANCE, TransportGetTrainedModelsStatsAction.class));
        actionHandlers.add(new ActionHandler<>(PutTrainedModelAction.INSTANCE, TransportPutTrainedModelAction.class));
        actionHandlers.add(new ActionHandler<>(PutTrainedModelAliasAction.INSTANCE, TransportPutTrainedModelAliasAction.class));
        actionHandlers.add(new ActionHandler<>(DeleteTrainedModelAliasAction.INSTANCE, TransportDeleteTrainedModelAliasAction.class));
        actionHandlers.add(new ActionHandler<>(PutTrainedModelDefinitionPartAction.INSTANCE, TransportPutTrainedModelDefinitionPartAction.class));
        actionHandlers.add(new ActionHandler<>(FlushTrainedModelCacheAction.INSTANCE, TransportFlushTrainedModelCacheAction.class));
        actionHandlers.add(new ActionHandler<>(InferModelAction.INSTANCE, TransportInternalInferModelAction.class));
        actionHandlers.add(new ActionHandler<>(InferModelAction.EXTERNAL_INSTANCE, TransportExternalInferModelAction.class));
        actionHandlers.add(new ActionHandler<>(GetDeploymentStatsAction.INSTANCE, TransportGetDeploymentStatsAction.class));
        if (machineLearningExtension.get().isDataFrameAnalyticsEnabled()) {
            actionHandlers.add(new ActionHandler<>(GetDataFrameAnalyticsAction.INSTANCE, TransportGetDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(GetDataFrameAnalyticsStatsAction.INSTANCE, TransportGetDataFrameAnalyticsStatsAction.class));
            actionHandlers.add(new ActionHandler<>(PutDataFrameAnalyticsAction.INSTANCE, TransportPutDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(UpdateDataFrameAnalyticsAction.INSTANCE, TransportUpdateDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(DeleteDataFrameAnalyticsAction.INSTANCE, TransportDeleteDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(StartDataFrameAnalyticsAction.INSTANCE, TransportStartDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(StopDataFrameAnalyticsAction.INSTANCE, TransportStopDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(EvaluateDataFrameAction.INSTANCE, TransportEvaluateDataFrameAction.class));
            actionHandlers.add(new ActionHandler<>(ExplainDataFrameAnalyticsAction.INSTANCE, TransportExplainDataFrameAnalyticsAction.class));
            actionHandlers.add(new ActionHandler<>(PreviewDataFrameAnalyticsAction.INSTANCE, TransportPreviewDataFrameAnalyticsAction.class));
        }
        if (machineLearningExtension.get().isNlpEnabled()) {
            actionHandlers.add(new ActionHandler<>(StartTrainedModelDeploymentAction.INSTANCE, TransportStartTrainedModelDeploymentAction.class));
            actionHandlers.add(new ActionHandler<>(StopTrainedModelDeploymentAction.INSTANCE, TransportStopTrainedModelDeploymentAction.class));
            actionHandlers.add(new ActionHandler<>(InferTrainedModelDeploymentAction.INSTANCE, TransportInferTrainedModelDeploymentAction.class));
            actionHandlers.add(new ActionHandler<>(UpdateTrainedModelDeploymentAction.INSTANCE, TransportUpdateTrainedModelDeploymentAction.class));
            actionHandlers.add(new ActionHandler<>(PutTrainedModelVocabularyAction.INSTANCE, TransportPutTrainedModelVocabularyAction.class));
            actionHandlers.add(new ActionHandler<>(ClearDeploymentCacheAction.INSTANCE, TransportClearDeploymentCacheAction.class));
            actionHandlers.add(new ActionHandler<>(CreateTrainedModelAssignmentAction.INSTANCE, TransportCreateTrainedModelAssignmentAction.class));
            actionHandlers.add(new ActionHandler<>(DeleteTrainedModelAssignmentAction.INSTANCE, TransportDeleteTrainedModelAssignmentAction.class));
            actionHandlers.add(new ActionHandler<>(UpdateTrainedModelAssignmentRoutingInfoAction.INSTANCE, TransportUpdateTrainedModelAssignmentStateAction.class));
            actionHandlers.add(new ActionHandler<>(CoordinatedInferenceAction.INSTANCE, TransportCoordinatedInferenceAction.class));
        }
    }
    return actionHandlers;
}
253979.674690elasticsearch
private Table buildTable(RestRequest request, Response jobStats) {
    Table table = getTableWithHeader(request);
    jobStats.getResponse().results().forEach(job -> {
        table.startRow();
        table.addCell(job.getJobId());
        table.addCell(job.getState().value());
        table.addCell(job.getOpenTime());
        table.addCell(job.getAssignmentExplanation());
        DataCounts dataCounts = job.getDataCounts();
        table.addCell(dataCounts.getProcessedRecordCount());
        table.addCell(dataCounts.getProcessedFieldCount());
        table.addCell(ByteSizeValue.ofBytes(dataCounts.getInputBytes()));
        table.addCell(dataCounts.getInputRecordCount());
        table.addCell(dataCounts.getInputFieldCount());
        table.addCell(dataCounts.getInvalidDateCount());
        table.addCell(dataCounts.getMissingFieldCount());
        table.addCell(dataCounts.getOutOfOrderTimeStampCount());
        table.addCell(dataCounts.getEmptyBucketCount());
        table.addCell(dataCounts.getSparseBucketCount());
        table.addCell(dataCounts.getBucketCount());
        table.addCell(dataCounts.getEarliestRecordTimeStamp());
        table.addCell(dataCounts.getLatestRecordTimeStamp());
        table.addCell(dataCounts.getLastDataTimeStamp());
        table.addCell(dataCounts.getLatestEmptyBucketTimeStamp());
        table.addCell(dataCounts.getLatestSparseBucketTimeStamp());
        ModelSizeStats modelSizeStats = job.getModelSizeStats();
        table.addCell(modelSizeStats == null ? null : ByteSizeValue.ofBytes(modelSizeStats.getModelBytes()));
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getMemoryStatus().toString());
        table.addCell(modelSizeStats == null || modelSizeStats.getModelBytesExceeded() == null ? null : ByteSizeValue.ofBytes(modelSizeStats.getModelBytesExceeded()));
        table.addCell(modelSizeStats == null || modelSizeStats.getModelBytesMemoryLimit() == null ? null : ByteSizeValue.ofBytes(modelSizeStats.getModelBytesMemoryLimit()));
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getTotalByFieldCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getTotalOverFieldCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getTotalPartitionFieldCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getBucketAllocationFailuresCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getCategorizationStatus().toString());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getCategorizedDocCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getTotalCategoryCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getFrequentCategoryCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getRareCategoryCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getDeadCategoryCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getFailedCategoryCount());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getLogTime());
        table.addCell(modelSizeStats == null ? null : modelSizeStats.getTimestamp());
        ForecastStats forecastStats = job.getForecastStats();
        boolean missingForecastStats = forecastStats == null || forecastStats.getTotal() <= 0L;
        table.addCell(forecastStats == null ? null : forecastStats.getTotal());
        table.addCell(missingForecastStats ? null : ByteSizeValue.ofBytes((long) forecastStats.getMemoryStats().getMin()));
        table.addCell(missingForecastStats ? null : ByteSizeValue.ofBytes((long) forecastStats.getMemoryStats().getMax()));
        table.addCell(missingForecastStats ? null : ByteSizeValue.ofBytes(Math.round(forecastStats.getMemoryStats().getAvg())));
        table.addCell(missingForecastStats ? null : ByteSizeValue.ofBytes((long) forecastStats.getMemoryStats().getTotal()));
        table.addCell(missingForecastStats ? null : forecastStats.getRecordStats().getMin());
        table.addCell(missingForecastStats ? null : forecastStats.getRecordStats().getMax());
        table.addCell(missingForecastStats ? null : forecastStats.getRecordStats().getAvg());
        table.addCell(missingForecastStats ? null : forecastStats.getRecordStats().getTotal());
        table.addCell(missingForecastStats ? null : TimeValue.timeValueMillis((long) forecastStats.getRuntimeStats().getMin()));
        table.addCell(missingForecastStats ? null : TimeValue.timeValueMillis((long) forecastStats.getRuntimeStats().getMax()));
        table.addCell(missingForecastStats ? null : forecastStats.getRuntimeStats().getAvg());
        table.addCell(missingForecastStats ? null : TimeValue.timeValueMillis((long) forecastStats.getRuntimeStats().getTotal()));
        DiscoveryNode node = job.getNode();
        table.addCell(node == null ? null : node.getId());
        table.addCell(node == null ? null : node.getName());
        table.addCell(node == null ? null : node.getEphemeralId());
        table.addCell(node == null ? null : node.getAddress().toString());
        TimingStats timingStats = job.getTimingStats();
        table.addCell(timingStats == null ? null : timingStats.getBucketCount());
        table.addCell(timingStats == null ? null : TimeValue.timeValueMillis((long) timingStats.getTotalBucketProcessingTimeMs()));
        table.addCell(timingStats == null || timingStats.getMinBucketProcessingTimeMs() == null ? null : TimeValue.timeValueMillis(timingStats.getMinBucketProcessingTimeMs().longValue()));
        table.addCell(timingStats == null || timingStats.getMaxBucketProcessingTimeMs() == null ? null : TimeValue.timeValueMillis(timingStats.getMaxBucketProcessingTimeMs().longValue()));
        table.addCell(timingStats == null ? null : timingStats.getExponentialAvgBucketProcessingTimeMs());
        table.addCell(timingStats == null ? null : timingStats.getExponentialAvgBucketProcessingTimePerHourMs());
        table.endRow();
    });
    return table;
}
253620.2717149elasticsearch
private void assertResults(List<FrequentItemSet> expected, FrequentItemSet[] actual, double minSupport, int minimumSetSize, int size, String stringExclude, Integer intExclude) {
    expected.get(0).getFields().values().stream().mapToLong(v -> v.stream().count()).sum();
    List<FrequentItemSet> filteredExpectedWithDups = expected.stream().map(fi -> new FrequentItemSet(fi.getFields().entrySet().stream().map(keyValues -> tuple(keyValues.getKey(), keyValues.getValue().stream().filter(v -> v.equals(stringExclude) == false).collect(Collectors.toList()))).map(keyValues -> tuple(keyValues.v1(), keyValues.v2().stream().filter(v -> v.equals(intExclude) == false).collect(Collectors.toList()))).filter(t -> t.v2().size() > 0).collect(Collectors.toMap(Tuple::v1, Tuple::v2)), fi.getDocCount(), fi.getSupport())).filter(fi -> fi.getSupport() >= minSupport).filter(fi -> {
        return fi.getFields().values().stream().map(v -> v.stream().count()).mapToLong(e -> e.longValue()).sum() >= minimumSetSize;
    }).sorted((a, b) -> {
        if (a.getDocCount() == b.getDocCount()) {
            if (b.getFields().size() == a.getFields().size()) {
                return Strings.collectionToCommaDelimitedString(a.getFields().entrySet().stream().map(e -> e.getKey() + ": " + e.getValue()).sorted(String::compareTo).collect(Collectors.toList())).compareTo(Strings.collectionToCommaDelimitedString(b.getFields().entrySet().stream().map(e -> e.getKey() + ": " + e.getValue()).sorted(String::compareTo).collect(Collectors.toList())));
            }
            return b.getFields().size() - a.getFields().size();
        }
        return (int) (b.getDocCount() - a.getDocCount());
    }).collect(Collectors.toList());
    List<FrequentItemSet> filteredExpected = new ArrayList<>();
    Set<Integer> valuesSeen = new HashSet<>();
    for (FrequentItemSet fi : filteredExpectedWithDups) {
        if (valuesSeen.add(fi.getFields().entrySet().stream().mapToInt(v -> Objects.hash(v.getKey(), v.getValue())).reduce(13, (t, s) -> 41 * t + s))) {
            filteredExpected.add(fi);
        }
    }
    int additionalSetsThatShareTheSameDocCount = 0;
    if (size < filteredExpected.size()) {
        int sizeAtCut = filteredExpected.get(size - 1).getFields().size();
        int startCutPosition = size;
        while (startCutPosition < filteredExpected.size() && filteredExpected.get(startCutPosition).getFields().size() == sizeAtCut) {
            ++startCutPosition;
            ++additionalSetsThatShareTheSameDocCount;
        }
        filteredExpected = filteredExpected.subList(0, startCutPosition);
    }
    String setsAssertMessage = "expected: [" + Strings.collectionToDelimitedString(filteredExpected, ", ") + "] got [" + Strings.arrayToDelimitedString(actual, ", ") + "] parameters: minumum_support: " + minSupport + " minimum_set_size: " + minimumSetSize + " size: " + size + " string exclude: [" + stringExclude + "] int exclude: [" + intExclude + "]";
    assertEquals("number of results do not match, " + setsAssertMessage, filteredExpected.size() - additionalSetsThatShareTheSameDocCount, actual.length);
    List<FrequentItemSet> unmatchedActual = new ArrayList<>(Arrays.asList(actual));
    for (FrequentItemSet expectedSet : filteredExpected) {
        boolean foundSet = false;
        for (int i = 0; i < unmatchedActual.size(); ++i) {
            assertEquals("did not find item [" + expectedSet + "], " + setsAssertMessage, unmatchedActual.get(i).getDocCount(), expectedSet.getDocCount());
            assertEquals("did not find item [" + expectedSet + "], " + setsAssertMessage, unmatchedActual.get(i).getSupport(), expectedSet.getSupport(), 0.00001);
            assertEquals("did not find item in the expected order(longer first) [" + expectedSet + "], all sets: " + setsAssertMessage, expectedSet.getFields().size(), unmatchedActual.get(i).getFields().size());
            if (expectedSet.getFields().keySet().equals(unmatchedActual.get(i).getFields().keySet())) {
                boolean matchedAllValuesForOneField = false;
                for (Entry<String, List<Object>> entry : expectedSet.getFields().entrySet()) {
                    if (containsInAnyOrder(entry.getValue()).matches(unmatchedActual.get(i).getFields().get(entry.getKey()))) {
                        matchedAllValuesForOneField = true;
                    }
                    if (matchedAllValuesForOneField) {
                        break;
                    }
                }
                if (matchedAllValuesForOneField == false) {
                    unmatchedActual.remove(i);
                    foundSet = true;
                    break;
                }
            }
        }
        if (foundSet == false && additionalSetsThatShareTheSameDocCount > 0) {
            --additionalSetsThatShareTheSameDocCount;
        } else {
            assertTrue("did not find item [" + expectedSet + "], " + setsAssertMessage, foundSet);
        }
    }
    assertEquals("more items found than expected, " + setsAssertMessage, 0, unmatchedActual.size());
}
255079.471163elasticsearch
public void testCoordinatorRankTieBreaker() {
    RRFQueryPhaseRankCoordinatorContext context = new RRFQueryPhaseRankCoordinatorContext(4, 0, 5, 1);
    QuerySearchResult qsr0 = new QuerySearchResult();
    qsr0.setShardIndex(1);
    RRFRankDoc rd11 = new RRFRankDoc(1, -1, 2);
    rd11.positions[0] = 0;
    rd11.positions[1] = 0;
    rd11.scores[0] = 10.0f;
    rd11.scores[1] = 7.0f;
    qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11 }));
    QuerySearchResult qsr1 = new QuerySearchResult();
    qsr1.setShardIndex(2);
    RRFRankDoc rd21 = new RRFRankDoc(1, -1, 2);
    rd21.positions[0] = 0;
    rd21.positions[1] = 0;
    rd21.scores[0] = 9.0f;
    rd21.scores[1] = 8.0f;
    qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21 }));
    TopDocsStats tds = new TopDocsStats(0);
    ScoreDoc[] scoreDocs = context.rankQueryPhaseResults(List.of(qsr0, qsr1), tds);
    assertEquals(2, tds.fetchHits);
    assertEquals(2, scoreDocs.length);
    RRFRankDoc expected = new RRFRankDoc(1, 1, 2);
    expected.rank = 1;
    expected.positions[0] = 0;
    expected.positions[1] = 1;
    expected.scores[0] = 10.0f;
    expected.scores[1] = 7.0f;
    expected.score = 0.8333333730697632f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[0]);
    expected = new RRFRankDoc(1, 2, 2);
    expected.rank = 2;
    expected.positions[0] = 1;
    expected.positions[1] = 0;
    expected.scores[0] = 9.0f;
    expected.scores[1] = 8.0f;
    expected.score = 0.8333333730697632f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[1]);
    qsr0 = new QuerySearchResult();
    qsr0.setShardIndex(1);
    rd11 = new RRFRankDoc(1, -1, 2);
    rd11.positions[0] = 0;
    rd11.positions[1] = -1;
    rd11.scores[0] = 10.0f;
    rd11.scores[1] = 0.0f;
    RRFRankDoc rd12 = new RRFRankDoc(2, -1, 2);
    rd12.positions[0] = 0;
    rd12.positions[1] = 1;
    rd12.scores[0] = 9.0f;
    rd12.scores[1] = 7.0f;
    qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 }));
    qsr1 = new QuerySearchResult();
    qsr1.setShardIndex(2);
    rd21 = new RRFRankDoc(1, -1, 2);
    rd21.positions[0] = -1;
    rd21.positions[1] = 0;
    rd21.scores[0] = 0.0f;
    rd21.scores[1] = 11.0f;
    RRFRankDoc rd22 = new RRFRankDoc(2, -1, 2);
    rd22.positions[0] = 0;
    rd22.positions[1] = 1;
    rd22.scores[0] = 9.0f;
    rd22.scores[1] = 9.0f;
    qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 }));
    tds = new TopDocsStats(0);
    scoreDocs = context.rankQueryPhaseResults(List.of(qsr0, qsr1), tds);
    assertEquals(4, tds.fetchHits);
    assertEquals(4, scoreDocs.length);
    expected = new RRFRankDoc(2, 2, 2);
    expected.rank = 1;
    expected.positions[0] = 2;
    expected.positions[1] = 1;
    expected.scores[0] = 9.0f;
    expected.scores[1] = 9.0f;
    expected.score = 0.5833333730697632f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[0]);
    expected = new RRFRankDoc(2, 1, 2);
    expected.rank = 2;
    expected.positions[0] = 1;
    expected.positions[1] = 2;
    expected.scores[0] = 9.0f;
    expected.scores[1] = 7.0f;
    expected.score = 0.5833333730697632f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[1]);
    expected = new RRFRankDoc(1, 1, 2);
    expected.rank = 3;
    expected.positions[0] = 0;
    expected.positions[1] = -1;
    expected.scores[0] = 10.0f;
    expected.scores[1] = 0.0f;
    expected.score = 0.5f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[2]);
    expected = new RRFRankDoc(1, 2, 2);
    expected.rank = 4;
    expected.positions[0] = -1;
    expected.positions[1] = 0;
    expected.scores[0] = 0.0f;
    expected.scores[1] = 11.0f;
    expected.score = 0.5f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[3]);
    qsr0 = new QuerySearchResult();
    qsr0.setShardIndex(1);
    rd11 = new RRFRankDoc(1, -1, 2);
    rd11.positions[0] = 0;
    rd11.positions[1] = -1;
    rd11.scores[0] = 10.0f;
    rd11.scores[1] = 0.0f;
    rd12 = new RRFRankDoc(2, -1, 2);
    rd12.positions[0] = -1;
    rd12.positions[1] = 0;
    rd12.scores[0] = 0.0f;
    rd12.scores[1] = 12.0f;
    qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 }));
    qsr1 = new QuerySearchResult();
    qsr1.setShardIndex(2);
    rd21 = new RRFRankDoc(1, -1, 2);
    rd21.positions[0] = 0;
    rd21.positions[1] = -1;
    rd21.scores[0] = 3.0f;
    rd21.scores[1] = 0.0f;
    rd22 = new RRFRankDoc(2, -1, 2);
    rd22.positions[0] = -1;
    rd22.positions[1] = 0;
    rd22.scores[0] = 0.0f;
    rd22.scores[1] = 5.0f;
    qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 }));
    tds = new TopDocsStats(0);
    scoreDocs = context.rankQueryPhaseResults(List.of(qsr0, qsr1), tds);
    assertEquals(4, tds.fetchHits);
    assertEquals(4, scoreDocs.length);
    expected = new RRFRankDoc(1, 1, 2);
    expected.rank = 1;
    expected.positions[0] = 0;
    expected.positions[1] = -1;
    expected.scores[0] = 10.0f;
    expected.scores[1] = 0.0f;
    expected.score = 0.5f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[0]);
    expected = new RRFRankDoc(2, 1, 2);
    expected.rank = 2;
    expected.positions[0] = -1;
    expected.positions[1] = 0;
    expected.scores[0] = 0.0f;
    expected.scores[1] = 12.0f;
    expected.score = 0.5f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[1]);
    expected = new RRFRankDoc(1, 2, 2);
    expected.rank = 3;
    expected.positions[0] = 1;
    expected.positions[1] = -1;
    expected.scores[0] = 3.0f;
    expected.scores[1] = 0.0f;
    expected.score = 0.3333333333333333f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[2]);
    expected = new RRFRankDoc(2, 2, 2);
    expected.rank = 4;
    expected.positions[0] = -1;
    expected.positions[1] = 1;
    expected.scores[0] = 0.0f;
    expected.scores[1] = 5.0f;
    expected.score = 0.3333333333333333f;
    assertRDEquals(expected, (RRFRankDoc) scoreDocs[3]);
}
253185.048176elasticsearch
public void setup() throws Exception {
    final RealmIdentifier realmId = new RealmIdentifier("saml", REALM_NAME);
    final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI());
    final Settings settings = Settings.builder().put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).put("path.home", createTempDir()).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_METADATA_PATH), metadata.toString()).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_ENTITY_ID), SamlRealmTests.TEST_IDP_ENTITY_ID).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.SP_ENTITY_ID), SamlRealmTestHelper.SP_ENTITY_ID).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.SP_ACS), SamlRealmTestHelper.SP_ACS_URL).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.SP_LOGOUT), SamlRealmTestHelper.SP_LOGOUT_URL).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid").put(getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0).build();
    this.threadPool = new TestThreadPool("saml test thread pool", settings);
    final ThreadContext threadContext = threadPool.getThreadContext();
    AuthenticationTestHelper.builder().user(new User("kibana")).realmRef(new RealmRef("realm", "type", "node")).build(false).writeToContext(threadContext);
    indexRequests = new ArrayList<>();
    searchRequests = new ArrayList<>();
    bulkRequests = new ArrayList<>();
    final Client client = new NoOpClient(threadPool) {

        @Override
        @SuppressWarnings("unchecked")
        protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action, Request request, ActionListener<Response> listener) {
            if (TransportIndexAction.NAME.equals(action.name())) {
                assertThat(request, instanceOf(IndexRequest.class));
                IndexRequest indexRequest = (IndexRequest) request;
                indexRequests.add(indexRequest);
                final IndexResponse response = new IndexResponse(new ShardId("test", "test", 0), indexRequest.id(), 1, 1, 1, true);
                listener.onResponse((Response) response);
            } else if (TransportBulkAction.NAME.equals(action.name())) {
                assertThat(request, instanceOf(BulkRequest.class));
                BulkRequest bulkRequest = (BulkRequest) request;
                bulkRequests.add(bulkRequest);
                BulkItemResponse[] bulkItemResponses = new BulkItemResponse[bulkRequest.requests().size()];
                for (int i = 0; i < bulkItemResponses.length; i++) {
                    UpdateResponse updateResponse = mock(UpdateResponse.class);
                    DocWriteResponse.Result docWriteResponse = randomFrom(DocWriteResponse.Result.UPDATED, DocWriteResponse.Result.NOOP);
                    when(updateResponse.getResult()).thenReturn(docWriteResponse);
                    GetResult getResult = mock(GetResult.class);
                    when(getResult.getId()).thenReturn(bulkRequest.requests().get(i).id());
                    when(updateResponse.getGetResult()).thenReturn(getResult);
                    bulkItemResponses[i] = BulkItemResponse.success(i, DocWriteRequest.OpType.UPDATE, updateResponse);
                }
                BulkResponse response = new BulkResponse(bulkItemResponses, 1);
                listener.onResponse((Response) response);
            } else if (TransportSearchAction.TYPE.name().equals(action.name())) {
                assertThat(request, instanceOf(SearchRequest.class));
                SearchRequest searchRequest = (SearchRequest) request;
                searchRequests.add(searchRequest);
                final SearchHit[] hits = searchFunction.apply(searchRequest);
                final var searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f);
                try {
                    ActionListener.respondAndRelease(listener, (Response) new SearchResponse(searchHits, null, null, false, false, null, 1, "_scrollId1", 1, 1, 0, 1, null, null));
                } finally {
                    searchHits.decRef();
                }
            } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) {
                assertThat(request, instanceOf(SearchScrollRequest.class));
                ActionListener.respondAndRelease(listener, (Response) new SearchResponse(SearchHits.EMPTY_WITH_TOTAL_HITS, null, null, false, false, null, 1, "_scrollId1", 1, 1, 0, 1, null, null));
            } else if (TransportClearScrollAction.NAME.equals(action.name())) {
                assertThat(request, instanceOf(ClearScrollRequest.class));
                ClearScrollRequest scrollRequest = (ClearScrollRequest) request;
                assertEquals("_scrollId1", scrollRequest.getScrollIds().get(0));
                ClearScrollResponse response = new ClearScrollResponse(true, 1);
                listener.onResponse((Response) response);
            } else if (RefreshAction.NAME.equals(action.name())) {
                assertThat(request, instanceOf(RefreshRequest.class));
                listener.onResponse((Response) mock(BroadcastResponse.class));
            } else {
                super.doExecute(action, request, listener);
            }
        }
    };
    final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class);
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).prepareIndexIfNeededThenExecute(anyConsumer(), any(Runnable.class));
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class));
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true);
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true);
    when(securityIndex.indexExists()).thenReturn(true);
    when(securityIndex.isIndexUpToDate()).thenReturn(true);
    when(securityIndex.getCreationTime()).thenReturn(Clock.systemUTC().instant());
    when(securityIndex.aliasName()).thenReturn(".security");
    when(securityIndex.defensiveCopy()).thenReturn(securityIndex);
    final MockLicenseState licenseState = mock(MockLicenseState.class);
    when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true);
    final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
    final SecurityContext securityContext = new SecurityContext(settings, threadContext);
    tokenService = new TokenService(settings, Clock.systemUTC(), client, licenseState, securityContext, securityIndex, securityIndex, clusterService);
    final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
    final Realms realms = mock(Realms.class);
    action = new TransportSamlInvalidateSessionAction(transportService, mock(ActionFilters.class), tokenService, realms);
    final Environment env = TestEnvironment.newEnvironment(settings);
    final RealmConfig realmConfig = new RealmConfig(realmId, settings, env, threadContext);
    samlRealm = SamlRealmTestHelper.buildRealm(realmConfig, null);
    when(realms.realm(realmConfig.name())).thenReturn(samlRealm);
    when(realms.stream()).thenAnswer(i -> Stream.of(samlRealm));
    logoutRequest = new SamlLogoutRequestHandler.Result(randomAlphaOfLengthBetween(8, 24), new SamlNameId(NameID.TRANSIENT, randomAlphaOfLengthBetween(8, 24), null, null, null), randomAlphaOfLengthBetween(12, 16), null);
    when(samlRealm.getLogoutHandler().parseFromQueryString(anyString())).thenReturn(logoutRequest);
}
253155.832185elasticsearch
public void testTryParseToken() throws IOException, IllegalAccessException {
    assertNull(ServiceAccountService.tryParseToken(null));
    final byte[] magicBytes = { 0, 1, 0, 1 };
    final Logger satLogger = LogManager.getLogger(ServiceAccountToken.class);
    Loggers.setLevel(satLogger, Level.TRACE);
    final Logger sasLogger = LogManager.getLogger(ServiceAccountService.class);
    Loggers.setLevel(sasLogger, Level.TRACE);
    try (var satAppender = MockLogAppender.capture(ServiceAccountToken.class);
        var sasAppender = MockLogAppender.capture(ServiceAccountService.class)) {
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("less than 4 bytes", ServiceAccountToken.class.getName(), Level.TRACE, "service account token expects the 4 leading bytes"));
        final SecureString bearerString0 = createBearerString(List.of(Arrays.copyOfRange(magicBytes, 0, randomIntBetween(0, 3))));
        assertNull(ServiceAccountService.tryParseToken(bearerString0));
        satAppender.assertAllExpectationsMatched();
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("prefix mismatch", ServiceAccountToken.class.getName(), Level.TRACE, "service account token expects the 4 leading bytes"));
        final SecureString bearerString1 = createBearerString(List.of(new byte[] { randomValueOtherThan((byte) 0, ESTestCase::randomByte) }, randomByteArrayOfLength(randomIntBetween(30, 50))));
        assertNull(ServiceAccountService.tryParseToken(bearerString1));
        satAppender.assertAllExpectationsMatched();
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("no colon", ServiceAccountToken.class.getName(), Level.TRACE, "failed to extract qualified service token name and secret, missing ':'"));
        final SecureString bearerString2 = createBearerString(List.of(magicBytes, randomAlphaOfLengthBetween(30, 50).getBytes(StandardCharsets.UTF_8)));
        assertNull(ServiceAccountService.tryParseToken(bearerString2));
        satAppender.assertAllExpectationsMatched();
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid delimiter for qualified name", ServiceAccountToken.class.getName(), Level.TRACE, "The qualified name of a service token should take format of 'namespace/service_name/token_name'"));
        if (randomBoolean()) {
            final SecureString bearerString3 = createBearerString(List.of(magicBytes, (randomAlphaOfLengthBetween(10, 20) + ":" + randomAlphaOfLengthBetween(10, 20)).getBytes(StandardCharsets.UTF_8)));
            assertNull(ServiceAccountService.tryParseToken(bearerString3));
        } else {
            final SecureString bearerString3 = createBearerString(List.of(magicBytes, (randomAlphaOfLengthBetween(3, 8) + "/" + randomAlphaOfLengthBetween(3, 8) + ":" + randomAlphaOfLengthBetween(10, 20)).getBytes(StandardCharsets.UTF_8)));
            assertNull(ServiceAccountService.tryParseToken(bearerString3));
        }
        satAppender.assertAllExpectationsMatched();
        sasAppender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid token name", ServiceAccountService.class.getName(), Level.TRACE, "Cannot parse possible service account token"));
        final SecureString bearerString4 = createBearerString(List.of(magicBytes, (randomAlphaOfLengthBetween(3, 8) + "/" + randomAlphaOfLengthBetween(3, 8) + "/" + randomValueOtherThanMany(n -> n.contains("/"), ValidationTests::randomInvalidTokenName) + ":" + randomAlphaOfLengthBetween(10, 20)).getBytes(StandardCharsets.UTF_8)));
        assertNull(ServiceAccountService.tryParseToken(bearerString4));
        sasAppender.assertAllExpectationsMatched();
        final String namespace = randomAlphaOfLengthBetween(3, 8);
        final String serviceName = randomAlphaOfLengthBetween(3, 8);
        final String tokenName = ValidationTests.randomTokenName();
        final ServiceAccountId accountId = new ServiceAccountId(namespace, serviceName);
        final String secret = randomAlphaOfLengthBetween(10, 20);
        final SecureString bearerString5 = createBearerString(List.of(magicBytes, (namespace + "/" + serviceName + "/" + tokenName + ":" + secret).getBytes(StandardCharsets.UTF_8)));
        final ServiceAccountToken serviceAccountToken1 = ServiceAccountService.tryParseToken(bearerString5);
        final ServiceAccountToken serviceAccountToken2 = new ServiceAccountToken(accountId, tokenName, new SecureString(secret.toCharArray()));
        assertThat(serviceAccountToken1, equalTo(serviceAccountToken2));
        final ServiceAccountToken parsedToken = ServiceAccountService.tryParseToken(serviceAccountToken2.asBearerString());
        assertThat(parsedToken, equalTo(serviceAccountToken2));
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid magic byte again", ServiceAccountToken.class.getName(), Level.TRACE, "service account token expects the 4 leading bytes"));
        assertNull(ServiceAccountService.tryParseToken(new SecureString("AQEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())));
        satAppender.assertAllExpectationsMatched();
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("no colon again", ServiceAccountToken.class.getName(), Level.TRACE, "failed to extract qualified service token name and secret, missing ':'"));
        assertNull(ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4xX3N1cGVyc2VjcmV0".toCharArray())));
        satAppender.assertAllExpectationsMatched();
        satAppender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid delimiter for qualified name again", ServiceAccountToken.class.getName(), Level.TRACE, "The qualified name of a service token should take format of 'namespace/service_name/token_name'"));
        assertNull(ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXRfdG9rZW4xOnN1cGVyc2VjcmV0".toCharArray())));
        satAppender.assertAllExpectationsMatched();
        sasAppender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid token name again", ServiceAccountService.class.getName(), Level.TRACE, "Cannot parse possible service account token"));
        assertNull(ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQvdG9rZW4hOnN1cGVyc2VjcmV0".toCharArray())));
        sasAppender.assertAllExpectationsMatched();
        assertThat(ServiceAccountService.tryParseToken(new SecureString("AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL3Rva2VuMTpzdXBlcnNlY3JldA".toCharArray())), equalTo(new ServiceAccountToken(new ServiceAccountId("elastic", "fleet-server"), "token1", new SecureString("supersecret".toCharArray()))));
    } finally {
        Loggers.setLevel(satLogger, Level.INFO);
        Loggers.setLevel(sasLogger, Level.INFO);
    }
}
254256.182179elasticsearch
public void testAuthenticateWithToken() throws ExecutionException, InterruptedException, IllegalAccessException {
    final Logger sasLogger = LogManager.getLogger(ServiceAccountService.class);
    Loggers.setLevel(sasLogger, Level.TRACE);
    try (var appender = MockLogAppender.capture(ServiceAccountService.class)) {
        final ServiceAccountId accountId1 = new ServiceAccountId(randomValueOtherThan(ElasticServiceAccounts.NAMESPACE, () -> randomAlphaOfLengthBetween(3, 8)), randomAlphaOfLengthBetween(3, 8));
        appender.addExpectation(new MockLogAppender.SeenEventExpectation("non-elastic service account", ServiceAccountService.class.getName(), Level.DEBUG, "only [elastic] service accounts are supported, but received [" + accountId1.asPrincipal() + "]"));
        final SecureString secret = new SecureString(randomAlphaOfLength(20).toCharArray());
        final ServiceAccountToken token1 = new ServiceAccountToken(accountId1, randomAlphaOfLengthBetween(3, 8), secret);
        final PlainActionFuture<Authentication> future1 = new PlainActionFuture<>();
        serviceAccountService.authenticateToken(token1, randomAlphaOfLengthBetween(3, 8), future1);
        final ExecutionException e1 = expectThrows(ExecutionException.class, future1::get);
        assertThat(e1.getCause().getClass(), is(ElasticsearchSecurityException.class));
        assertThat(e1.getMessage(), containsString("failed to authenticate service account [" + token1.getAccountId().asPrincipal() + "] with token name [" + token1.getTokenName() + "]"));
        appender.assertAllExpectationsMatched();
        final ServiceAccountId accountId2 = new ServiceAccountId(ElasticServiceAccounts.NAMESPACE, randomValueOtherThan("fleet-server", () -> randomAlphaOfLengthBetween(3, 8)));
        appender.addExpectation(new MockLogAppender.SeenEventExpectation("unknown elastic service name", ServiceAccountService.class.getName(), Level.DEBUG, "the [" + accountId2.asPrincipal() + "] service account does not exist"));
        final ServiceAccountToken token2 = new ServiceAccountToken(accountId2, randomAlphaOfLengthBetween(3, 8), secret);
        final PlainActionFuture<Authentication> future2 = new PlainActionFuture<>();
        serviceAccountService.authenticateToken(token2, randomAlphaOfLengthBetween(3, 8), future2);
        final ExecutionException e2 = expectThrows(ExecutionException.class, future2::get);
        assertThat(e2.getCause().getClass(), is(ElasticsearchSecurityException.class));
        assertThat(e2.getMessage(), containsString("failed to authenticate service account [" + token2.getAccountId().asPrincipal() + "] with token name [" + token2.getTokenName() + "]"));
        appender.assertAllExpectationsMatched();
        final ServiceAccountId accountId3 = new ServiceAccountId(ElasticServiceAccounts.NAMESPACE, "fleet-server");
        final SecureString secret3 = new SecureString(randomAlphaOfLengthBetween(1, 9).toCharArray());
        final ServiceAccountToken token3 = new ServiceAccountToken(accountId3, randomAlphaOfLengthBetween(3, 8), secret3);
        appender.addExpectation(new MockLogAppender.SeenEventExpectation("secret value too short", ServiceAccountService.class.getName(), Level.DEBUG, "the provided credential has length [" + secret3.length() + "] but a token's secret value must be at least [10] characters"));
        final PlainActionFuture<Authentication> future3 = new PlainActionFuture<>();
        serviceAccountService.authenticateToken(token3, randomAlphaOfLengthBetween(3, 8), future3);
        final ExecutionException e3 = expectThrows(ExecutionException.class, future3::get);
        assertThat(e3.getCause().getClass(), is(ElasticsearchSecurityException.class));
        assertThat(e3.getMessage(), containsString("failed to authenticate service account [" + token3.getAccountId().asPrincipal() + "] with token name [" + token3.getTokenName() + "]"));
        appender.assertAllExpectationsMatched();
        final TokenInfo.TokenSource tokenSource = randomFrom(TokenInfo.TokenSource.values());
        final CachingServiceAccountTokenStore store;
        final CachingServiceAccountTokenStore otherStore;
        if (tokenSource == TokenInfo.TokenSource.FILE) {
            store = fileServiceAccountTokenStore;
            otherStore = indexServiceAccountTokenStore;
        } else {
            store = indexServiceAccountTokenStore;
            otherStore = fileServiceAccountTokenStore;
        }
        final ServiceAccountId accountId4 = new ServiceAccountId(ElasticServiceAccounts.NAMESPACE, "fleet-server");
        final ServiceAccountToken token4 = new ServiceAccountToken(accountId4, randomAlphaOfLengthBetween(3, 8), secret);
        final ServiceAccountToken token5 = new ServiceAccountToken(accountId4, randomAlphaOfLengthBetween(3, 8), new SecureString(randomAlphaOfLength(20).toCharArray()));
        final String nodeName = randomAlphaOfLengthBetween(3, 8);
        doAnswer(invocationOnMock -> {
            @SuppressWarnings("unchecked")
            final ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult> listener = (ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult>) invocationOnMock.getArguments()[1];
            listener.onResponse(new ServiceAccountTokenStore.StoreAuthenticationResult(true, store.getTokenSource()));
            return null;
        }).when(store).authenticate(eq(token4), any());
        doAnswer(invocationOnMock -> {
            @SuppressWarnings("unchecked")
            final ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult> listener = (ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult>) invocationOnMock.getArguments()[1];
            listener.onResponse(new ServiceAccountTokenStore.StoreAuthenticationResult(false, store.getTokenSource()));
            return null;
        }).when(store).authenticate(eq(token5), any());
        doAnswer(invocationOnMock -> {
            @SuppressWarnings("unchecked")
            final ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult> listener = (ActionListener<ServiceAccountTokenStore.StoreAuthenticationResult>) invocationOnMock.getArguments()[1];
            listener.onResponse(new ServiceAccountTokenStore.StoreAuthenticationResult(false, otherStore.getTokenSource()));
            return null;
        }).when(otherStore).authenticate(any(), any());
        final PlainActionFuture<Authentication> future4 = new PlainActionFuture<>();
        serviceAccountService.authenticateToken(token4, nodeName, future4);
        final Authentication authentication = future4.get();
        assertThat(authentication, equalTo(Authentication.newServiceAccountAuthentication(new User("elastic/fleet-server", Strings.EMPTY_ARRAY, "Service account - elastic/fleet-server", null, Map.of("_elastic_service_account", true), true), nodeName, Map.of("_token_name", token4.getTokenName(), "_token_source", tokenSource.name().toLowerCase(Locale.ROOT)))));
        appender.addExpectation(new MockLogAppender.SeenEventExpectation("invalid credential", ServiceAccountService.class.getName(), Level.DEBUG, "failed to authenticate service account [" + token5.getAccountId().asPrincipal() + "] with token name [" + token5.getTokenName() + "]"));
        final PlainActionFuture<Authentication> future5 = new PlainActionFuture<>();
        serviceAccountService.authenticateToken(token5, nodeName, future5);
        final ExecutionException e5 = expectThrows(ExecutionException.class, future5::get);
        assertThat(e5.getCause().getClass(), is(ElasticsearchSecurityException.class));
        assertThat(e5.getMessage(), containsString("failed to authenticate service account [" + token5.getAccountId().asPrincipal() + "] with token name [" + token5.getTokenName() + "]"));
        appender.assertAllExpectationsMatched();
    } finally {
        Loggers.setLevel(sasLogger, Level.INFO);
    }
}
253522.4720147elasticsearch
 static ShutdownShardMigrationStatus shardMigrationStatus(CancellableTask cancellableTask, ClusterState currentState, String nodeId, SingleNodeShutdownMetadata.Type shutdownType, boolean nodeSeen, ClusterInfoService clusterInfoService, SnapshotsInfoService snapshotsInfoService, AllocationService allocationService, AllocationDeciders allocationDeciders) {
    assert Transports.assertNotTransportThread("doing O(#shards) work must be forked");
    if (SingleNodeShutdownMetadata.Type.RESTART.equals(shutdownType)) {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0, "no shard relocation is necessary for a node restart", null);
    }
    if (currentState.nodes().get(nodeId) == null && nodeSeen == false) {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.NOT_STARTED, 0, "node is not currently part of the cluster", null);
    }
    final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, currentState, clusterInfoService.getClusterInfo(), snapshotsInfoService.snapshotShardSizes(), System.nanoTime());
    allocation.setDebugMode(RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS);
    Set<String> shuttingDownNodes = currentState.metadata().nodeShutdowns().getAll().keySet();
    var unassignedShards = currentState.getRoutingNodes().unassigned().stream().peek(s -> cancellableTask.ensureNotCancelled()).filter(s -> Objects.equals(s.unassignedInfo().getLastAllocatedNodeId(), nodeId)).filter(s -> s.primary() || hasShardCopyOnAnotherNode(currentState, s, shuttingDownNodes) == false).toList();
    if (unassignedShards.isEmpty() == false) {
        var shardRouting = unassignedShards.get(0);
        ShardAllocationDecision decision = allocationService.explainShardAllocation(shardRouting, allocation);
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.STALLED, unassignedShards.size(), format("shard [%s] [%s] of index [%s] is unassigned, see [%s] for details or use the cluster allocation explain API", shardRouting.shardId().getId(), shardRouting.primary() ? "primary" : "replica", shardRouting.index().getName(), NODE_ALLOCATION_DECISION_KEY), decision);
    }
    if (currentState.getRoutingNodes().node(nodeId) == null) {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0, 0, 0);
    }
    int startedShards = currentState.getRoutingNodes().node(nodeId).numberOfShardsWithState(ShardRoutingState.STARTED);
    int relocatingShards = currentState.getRoutingNodes().node(nodeId).numberOfShardsWithState(ShardRoutingState.RELOCATING);
    int initializingShards = currentState.getRoutingNodes().node(nodeId).numberOfShardsWithState(ShardRoutingState.INITIALIZING);
    int totalRemainingShards = relocatingShards + startedShards + initializingShards;
    if (relocatingShards > 0 || totalRemainingShards == 0) {
        SingleNodeShutdownMetadata.Status shardStatus = totalRemainingShards == 0 ? SingleNodeShutdownMetadata.Status.COMPLETE : SingleNodeShutdownMetadata.Status.IN_PROGRESS;
        return new ShutdownShardMigrationStatus(shardStatus, startedShards, relocatingShards, initializingShards);
    } else if (initializingShards > 0 && relocatingShards == 0 && startedShards == 0) {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.IN_PROGRESS, startedShards, relocatingShards, initializingShards, "all remaining shards are currently INITIALIZING and must finish before they can be moved off this node");
    }
    AtomicInteger shardsToIgnoreForFinalStatus = new AtomicInteger(0);
    Optional<Tuple<ShardRouting, ShardAllocationDecision>> unmovableShard = currentState.getRoutingNodes().node(nodeId).shardsWithState(ShardRoutingState.STARTED).peek(s -> cancellableTask.ensureNotCancelled()).map(shardRouting -> new Tuple<>(shardRouting, allocationService.explainShardAllocation(shardRouting, allocation))).filter(pair -> {
        assert pair.v2().getMoveDecision().canRemain() == false : "shard [" + pair + "] can remain on node [" + nodeId + "], but that node is shutting down";
        return pair.v2().getMoveDecision().canRemain() == false;
    }).filter(pair -> pair.v2().getMoveDecision().getAllocationDecision().equals(AllocationDecision.THROTTLED) == false).filter(pair -> pair.v2().getMoveDecision().getAllocationDecision().equals(AllocationDecision.YES) == false).filter(pair -> {
        final boolean hasShardCopyOnOtherNode = hasShardCopyOnAnotherNode(currentState, pair.v1(), shuttingDownNodes);
        if (hasShardCopyOnOtherNode) {
            shardsToIgnoreForFinalStatus.incrementAndGet();
        }
        return hasShardCopyOnOtherNode == false;
    }).filter(pair -> isIlmRestrictingShardMovement(currentState, pair.v1()) == false).peek(pair -> logger.debug("node [{}] shutdown of type [{}] stalled: found shard [{}][{}] from index [{}] with negative decision: [{}]", nodeId, shutdownType, pair.v1().getId(), pair.v1().primary() ? "primary" : "replica", pair.v1().shardId().getIndexName(), Strings.toString(pair.v2()))).findFirst();
    if (totalRemainingShards == shardsToIgnoreForFinalStatus.get() && unmovableShard.isEmpty()) {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.COMPLETE, 0, "[" + shardsToIgnoreForFinalStatus.get() + "] shards cannot be moved away from this node but have at least one copy on another node in the cluster", null);
    } else if (unmovableShard.isPresent()) {
        ShardRouting shardRouting = unmovableShard.get().v1();
        ShardAllocationDecision decision = unmovableShard.get().v2();
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.STALLED, totalRemainingShards, format("shard [%s] [%s] of index [%s] cannot move, see [%s] for details or use the cluster allocation explain API", shardRouting.shardId().getId(), shardRouting.primary() ? "primary" : "replica", shardRouting.index().getName(), NODE_ALLOCATION_DECISION_KEY), decision);
    } else {
        return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.IN_PROGRESS, startedShards, relocatingShards, initializingShards);
    }
}
252871.9535129gwt
 static String toDecimalScaledString(long value, int scale) {
    int resLengthInChars;
    int currentChar;
    char[] result;
    boolean negNumber = value < 0;
    if (negNumber) {
        value = -value;
    }
    if (value == 0) {
        switch(scale) {
            case 0:
                return "0";
            case 1:
                return "0.0";
            case 2:
                return "0.00";
            case 3:
                return "0.000";
            case 4:
                return "0.0000";
            case 5:
                return "0.00000";
            case 6:
                return "0.000000";
            default:
                StringBuilder result1 = new StringBuilder();
                if (scale < 0) {
                    result1.append("0E+");
                } else {
                    result1.append("0E");
                }
                result1.append((scale == Integer.MIN_VALUE) ? "2147483648" : Integer.toString(-scale));
                return result1.toString();
        }
    }
    resLengthInChars = 18;
    result = new char[resLengthInChars + 1];
    currentChar = resLengthInChars;
    long v = value;
    do {
        long prev = v;
        v /= 10;
        result[--currentChar] = (char) (0x0030 + (prev - v * 10));
    } while (v != 0);
    long exponent = (long) resLengthInChars - (long) currentChar - scale - 1L;
    if (scale == 0) {
        if (negNumber) {
            result[--currentChar] = '-';
        }
        return new String(result, currentChar, resLengthInChars - currentChar);
    }
    if (scale > 0 && exponent >= -6) {
        if (exponent >= 0) {
            int insertPoint = currentChar + (int) exponent;
            for (int j = resLengthInChars - 1; j >= insertPoint; j--) {
                result[j + 1] = result[j];
            }
            result[++insertPoint] = '.';
            if (negNumber) {
                result[--currentChar] = '-';
            }
            return new String(result, currentChar, resLengthInChars - currentChar + 1);
        }
        for (int j = 2; j < -exponent + 1; j++) {
            result[--currentChar] = '0';
        }
        result[--currentChar] = '.';
        result[--currentChar] = '0';
        if (negNumber) {
            result[--currentChar] = '-';
        }
        return new String(result, currentChar, resLengthInChars - currentChar);
    }
    int startPoint = currentChar + 1;
    int endPoint = resLengthInChars;
    StringBuilder result1 = new StringBuilder(16 + endPoint - startPoint);
    if (negNumber) {
        result1.append('-');
    }
    if (endPoint - startPoint >= 1) {
        result1.append(result[currentChar]);
        result1.append('.');
        result1.append(result, currentChar + 1, resLengthInChars - currentChar - 1);
    } else {
        result1.append(result, currentChar, resLengthInChars - currentChar);
    }
    result1.append('E');
    if (exponent > 0) {
        result1.append('+');
    }
    result1.append(Long.toString(exponent));
    return result1.toString();
}
252871.9535129gwt
 static String toDecimalScaledString(long value, int scale) {
    int resLengthInChars;
    int currentChar;
    char[] result;
    boolean negNumber = value < 0;
    if (negNumber) {
        value = -value;
    }
    if (value == 0) {
        switch(scale) {
            case 0:
                return "0";
            case 1:
                return "0.0";
            case 2:
                return "0.00";
            case 3:
                return "0.000";
            case 4:
                return "0.0000";
            case 5:
                return "0.00000";
            case 6:
                return "0.000000";
            default:
                StringBuilder result1 = new StringBuilder();
                if (scale < 0) {
                    result1.append("0E+");
                } else {
                    result1.append("0E");
                }
                result1.append((scale == Integer.MIN_VALUE) ? "2147483648" : Integer.toString(-scale));
                return result1.toString();
        }
    }
    resLengthInChars = 18;
    result = new char[resLengthInChars + 1];
    currentChar = resLengthInChars;
    long v = value;
    do {
        long prev = v;
        v /= 10;
        result[--currentChar] = (char) (0x0030 + (prev - v * 10));
    } while (v != 0);
    long exponent = (long) resLengthInChars - (long) currentChar - scale - 1L;
    if (scale == 0) {
        if (negNumber) {
            result[--currentChar] = '-';
        }
        return new String(result, currentChar, resLengthInChars - currentChar);
    }
    if (scale > 0 && exponent >= -6) {
        if (exponent >= 0) {
            int insertPoint = currentChar + (int) exponent;
            for (int j = resLengthInChars - 1; j >= insertPoint; j--) {
                result[j + 1] = result[j];
            }
            result[++insertPoint] = '.';
            if (negNumber) {
                result[--currentChar] = '-';
            }
            return new String(result, currentChar, resLengthInChars - currentChar + 1);
        }
        for (int j = 2; j < -exponent + 1; j++) {
            result[--currentChar] = '0';
        }
        result[--currentChar] = '.';
        result[--currentChar] = '0';
        if (negNumber) {
            result[--currentChar] = '-';
        }
        return new String(result, currentChar, resLengthInChars - currentChar);
    }
    int startPoint = currentChar + 1;
    int endPoint = resLengthInChars;
    StringBuilder result1 = new StringBuilder(16 + endPoint - startPoint);
    if (negNumber) {
        result1.append('-');
    }
    if (endPoint - startPoint >= 1) {
        result1.append(result[currentChar]);
        result1.append('.');
        result1.append(result, currentChar + 1, resLengthInChars - currentChar - 1);
    } else {
        result1.append(result, currentChar, resLengthInChars - currentChar);
    }
    result1.append('E');
    if (exponent > 0) {
        result1.append('+');
    }
    result1.append(Long.toString(exponent));
    return result1.toString();
}
253267.6615152hadoop
public int hash(byte[] key, int nbytes, int initval) {
    int length = nbytes;
    long a, b, c;
    a = b = c = (0x00000000deadbeefL + length + initval) & INT_MASK;
    int offset = 0;
    for (; length > 12; offset += 12, length -= 12) {
        a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK;
        a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK;
        b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK;
        c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        a = (a - c) & INT_MASK;
        a ^= rot(c, 4);
        c = (c + b) & INT_MASK;
        b = (b - a) & INT_MASK;
        b ^= rot(a, 6);
        a = (a + c) & INT_MASK;
        c = (c - b) & INT_MASK;
        c ^= rot(b, 8);
        b = (b + a) & INT_MASK;
        a = (a - c) & INT_MASK;
        a ^= rot(c, 16);
        c = (c + b) & INT_MASK;
        b = (b - a) & INT_MASK;
        b ^= rot(a, 19);
        a = (a + c) & INT_MASK;
        c = (c - b) & INT_MASK;
        c ^= rot(b, 4);
        b = (b + a) & INT_MASK;
    }
    switch(length) {
        case 12:
            c = (c + (((key[offset + 11] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        case 11:
            c = (c + (((key[offset + 10] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        case 10:
            c = (c + (((key[offset + 9] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        case 9:
            c = (c + (key[offset + 8] & BYTE_MASK)) & INT_MASK;
        case 8:
            b = (b + (((key[offset + 7] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        case 7:
            b = (b + (((key[offset + 6] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        case 6:
            b = (b + (((key[offset + 5] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        case 5:
            b = (b + (key[offset + 4] & BYTE_MASK)) & INT_MASK;
        case 4:
            a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK;
        case 3:
            a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK;
        case 2:
            a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK;
        case 1:
            a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK;
            break;
        case 0:
            return (int) (c & INT_MASK);
    }
    c ^= b;
    c = (c - rot(b, 14)) & INT_MASK;
    a ^= c;
    a = (a - rot(c, 11)) & INT_MASK;
    b ^= a;
    b = (b - rot(a, 25)) & INT_MASK;
    c ^= b;
    c = (c - rot(b, 16)) & INT_MASK;
    a ^= c;
    a = (a - rot(c, 4)) & INT_MASK;
    b ^= a;
    b = (b - rot(a, 14)) & INT_MASK;
    c ^= b;
    c = (c - rot(b, 24)) & INT_MASK;
    return (int) (c & INT_MASK);
}
253042.7715165hadoop
protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception {
    final int numDataNodes = cluster.getDataNodes().size();
    final NameNode nn = cluster.getNameNode();
    final NamenodeProtocols nnProto = nn.getRpcServer();
    final BlockManager bm = nn.getNamesystem().getBlockManager();
    final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
    SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    Path fileToRead = new Path(FILE_TO_READ);
    FileSystem fs = cluster.getFileSystem();
    byte[] expected = generateBytes(FILE_SIZE);
    createFile(fs, fileToRead, expected);
    FSDataInputStream in1 = fs.open(fileToRead);
    assertTrue(checkFile1(in1, expected));
    FSDataInputStream in2 = fs.open(fileToRead);
    assertTrue(checkFile1(in2, expected));
    FSDataInputStream in3 = fs.open(fileToRead);
    assertTrue(checkFile2(in3, expected));
    DFSClient client = null;
    try {
        client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    } finally {
        if (client != null)
            client.close();
    }
    List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
    LocatedBlock lblock = locatedBlocks.get(0);
    assertFalse(isBlockTokenExpired(lblock));
    tryRead(conf, lblock, true);
    while (!isBlockTokenExpired(lblock)) {
        try {
            Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
    }
    assertTrue(isBlockTokenExpired(lblock));
    tryRead(conf, lblock, false);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    tryRead(conf, lblock, true);
    long rightId = lblock.getBlock().getBlockId();
    long wrongId = rightId + 1;
    lblock.getBlock().setBlockId(wrongId);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    lblock.getBlock().setBlockId(rightId);
    tryRead(conf, lblock, false);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
    tryRead(conf, lblock, false);
    SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
    List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
        assertTrue(isBlockTokenExpired(blk));
    }
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
        assertTrue(isBlockTokenExpired(blk));
    }
    if (isStriped) {
        in2.seek(0);
    } else {
        assertTrue(in2.seekToNewSource(0));
    }
    assertTrue(checkFile1(in2, expected));
    List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
        assertTrue(isBlockTokenExpired(blk));
    }
    assertTrue(checkFile2(in3, expected));
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    cluster.shutdownNameNode(0);
    lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
        assertFalse(isBlockTokenExpired(blk));
    }
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
        assertFalse(isBlockTokenExpired(blk));
    }
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
        assertFalse(isBlockTokenExpired(blk));
    }
    assertTrue(checkFile2(in3, expected));
    cluster.restartNameNode(0);
    cluster.shutdownNameNode(0);
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    assertTrue(checkFile2(in3, expected));
    cluster.restartNameNode(0);
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    cluster.shutdownNameNode(0);
    in1.seek(0);
    assertFalse(checkFile1(in1, expected));
    assertFalse(checkFile2(in3, expected));
    cluster.restartNameNode(0);
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    assertTrue(checkFile2(in3, expected));
    assertTrue(cluster.restartDataNodes(false));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    assertTrue(checkFile2(in3, expected));
}
254360.414170hadoop
public void testRollback() throws Exception {
    File[] baseDirs;
    UpgradeUtilities.initialize();
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
        conf = new HdfsConfiguration();
        conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
        log("Normal NameNode rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        checkResult(NAME_NODE, nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("Normal DataNode rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        checkResult(DATA_NODE, dataNodeDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("Normal BlockPool rollback", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current", UpgradeUtilities.getCurrentBlockPoolID(cluster));
        UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous", UpgradeUtilities.getCurrentBlockPoolID(cluster));
        storageInfo = new StorageInfo(DataNodeLayoutVersion.getCurrentLayoutVersion() - 1, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        File[] dataCurrentDirs = new File[dataNodeDirs.length];
        for (int i = 0; i < dataNodeDirs.length; i++) {
            dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i] + "/current")).toString());
        }
        UpgradeUtilities.createDataNodeVersionFile(dataCurrentDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        assertTrue(cluster.isDataNodeUp());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode rollback without existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        startNameNodeShouldFail("None of the storage directories contain previous fs state");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("DataNode rollback without existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.UPGRADE).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode rollback with future stored layout version in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
        startBlockPoolShouldFail(StartupOption.ROLLBACK, cluster.getNamesystem().getBlockPoolId());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode rollback with newer fsscTime in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        NameNode.doRollback(conf, false);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        storageInfo = new StorageInfo(DataNodeLayoutVersion.getCurrentLayoutVersion(), UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
        startBlockPoolShouldFail(StartupOption.ROLLBACK, cluster.getNamesystem().getBlockPoolId());
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode rollback with no edits file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        deleteMatchingFiles(baseDirs, "edits.*");
        startNameNodeShouldFail("Gap in transactions");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with no image file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        deleteMatchingFiles(baseDirs, "fsimage_.*");
        startNameNodeShouldFail("No valid image files found");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with corrupt version file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        for (File f : baseDirs) {
            UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(StandardCharsets.UTF_8), "xxxxxxxxxxxxx".getBytes(StandardCharsets.UTF_8));
        }
        startNameNodeShouldFail("file VERSION has layoutVersion missing");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode rollback with old layout version in previous", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        storageInfo = new StorageInfo(1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(null));
        startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
}
255076.085165hadoop
public void testSpaceCommands() throws Exception {
    final Path parent = new Path(PathUtils.getTestDir(getClass()).getPath(), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    int fileLen = 1024;
    short replication = 3;
    int fileSpace = fileLen * replication;
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
    final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
    ContentSummary c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceQuota(), 4 * fileSpace);
    final Path quotaDir20 = new Path(parent, "nqdir0/qdir1/qdir20");
    dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
    c = dfs.getContentSummary(quotaDir20);
    compareQuotaUsage(c, dfs, quotaDir20);
    assertEquals(c.getSpaceQuota(), 6 * fileSpace);
    final Path quotaDir21 = new Path(parent, "nqdir0/qdir1/qdir21");
    assertTrue(dfs.mkdirs(quotaDir21));
    dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceQuota(), 2 * fileSpace);
    Path tempPath = new Path(quotaDir21, "nqdir32");
    assertTrue(dfs.mkdirs(tempPath));
    DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    boolean hasException = false;
    try {
        DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    assertEquals(c.getSpaceQuota(), 2 * fileSpace);
    c = dfs.getContentSummary(quotaDir20);
    compareQuotaUsage(c, dfs, quotaDir20);
    assertEquals(c.getSpaceConsumed(), 0);
    Path dstPath = new Path(quotaDir20, "nqdir30");
    Path srcPath = new Path(quotaDir21, "nqdir32");
    assertTrue(dfs.rename(srcPath, dstPath));
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    final Path file2 = new Path(dstPath, "fileDir/file2");
    int file2Len = 2 * fileLen;
    DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0);
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    hasException = false;
    try {
        assertFalse(dfs.rename(dstPath, srcPath));
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertFalse(dfs.exists(srcPath));
    assertTrue(dfs.exists(dstPath));
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceQuota(), 4 * fileSpace);
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    OutputStream out = dfs.append(file2);
    out.write(new byte[fileLen]);
    out.close();
    file2Len += fileLen;
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace);
    out = dfs.append(file2);
    hasException = false;
    try {
        out.write(new byte[fileLen + 1024]);
        out.flush();
        out.close();
    } catch (DSQuotaExceededException e) {
        hasException = true;
        IOUtils.closeStream(out);
    }
    assertTrue(hasException);
    file2Len += fileLen;
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
    dfs.setReplication(file2, (short) (replication - 1));
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
    hasException = false;
    try {
        dfs.setReplication(file2, (short) (replication + 1));
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
    dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
    dfs.setReplication(file2, (short) (replication + 1));
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
    final Path quotaDir2053 = new Path(parent, "hdfs-2053");
    assertTrue(dfs.mkdirs(quotaDir2053));
    final Path quotaDir2053_A = new Path(quotaDir2053, "A");
    assertTrue(dfs.mkdirs(quotaDir2053_A));
    final Path quotaDir2053_B = new Path(quotaDir2053, "B");
    assertTrue(dfs.mkdirs(quotaDir2053_B));
    final Path quotaDir2053_C = new Path(quotaDir2053, "C");
    assertTrue(dfs.mkdirs(quotaDir2053_C));
    int sizeFactorA = 1;
    int sizeFactorB = 2;
    int sizeFactorC = 4;
    dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET, (sizeFactorC + 1) * fileSpace);
    c = dfs.getContentSummary(quotaDir2053_C);
    compareQuotaUsage(c, dfs, quotaDir2053_C);
    assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_A);
    compareQuotaUsage(c, dfs, quotaDir2053_A);
    assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_B);
    compareQuotaUsage(c, dfs, quotaDir2053_B);
    assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_C);
    compareQuotaUsage(c, dfs, quotaDir2053_C);
    assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
    c = dfs.getContentSummary(quotaDir2053);
    compareQuotaUsage(c, dfs, quotaDir2053);
    assertEquals(c.getSpaceConsumed(), (sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
}
253689.9118137hadoop
public void addFileSystemMock() throws IOException {
    final SortedMap<String, String> fs = new ConcurrentSkipListMap<String, String>();
    DirectoryListing l = mockNn.getListing(anyString(), any(), anyBoolean());
    when(l).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} getListing({})", nsId, src);
        if (fs.get(src) == null) {
            throw new FileNotFoundException("File does not exist " + src);
        }
        if (!src.endsWith("/")) {
            src += "/";
        }
        Map<String, String> files = fs.subMap(src, src + Character.MAX_VALUE);
        List<HdfsFileStatus> list = new ArrayList<>();
        for (String file : files.keySet()) {
            if (file.substring(src.length()).indexOf('/') < 0) {
                HdfsFileStatus fileStatus = getMockHdfsFileStatus(file, fs.get(file));
                list.add(fileStatus);
            }
        }
        HdfsFileStatus[] array = list.toArray(new HdfsFileStatus[list.size()]);
        return new DirectoryListing(array, 0);
    });
    when(mockNn.getFileInfo(anyString())).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} getFileInfo({})", nsId, src);
        return getMockHdfsFileStatus(src, fs.get(src));
    });
    HdfsFileStatus c = mockNn.create(anyString(), any(), anyString(), any(), anyBoolean(), anyShort(), anyLong(), any(), any(), any());
    when(c).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} create({})", nsId, src);
        boolean createParent = (boolean) invocation.getArgument(4);
        if (createParent) {
            Path path = new Path(src).getParent();
            while (!path.isRoot()) {
                LOG.info("{} create parent {}", nsId, path);
                fs.put(path.toString(), "DIRECTORY");
                path = path.getParent();
            }
        }
        fs.put(src, "FILE");
        return getMockHdfsFileStatus(src, "FILE");
    });
    LocatedBlocks b = mockNn.getBlockLocations(anyString(), anyLong(), anyLong());
    when(b).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} getBlockLocations({})", nsId, src);
        if (!fs.containsKey(src)) {
            LOG.error("{} cannot find {} for getBlockLocations", nsId, src);
            throw new FileNotFoundException("File does not exist " + src);
        }
        return mock(LocatedBlocks.class);
    });
    boolean f = mockNn.complete(anyString(), anyString(), any(), anyLong());
    when(f).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        if (!fs.containsKey(src)) {
            LOG.error("{} cannot find {} for complete", nsId, src);
            throw new FileNotFoundException("File does not exist " + src);
        }
        return true;
    });
    LocatedBlock a = mockNn.addBlock(anyString(), anyString(), any(), any(), anyLong(), any(), any());
    when(a).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        if (!fs.containsKey(src)) {
            LOG.error("{} cannot find {} for addBlock", nsId, src);
            throw new FileNotFoundException("File does not exist " + src);
        }
        return getMockLocatedBlock(nsId);
    });
    boolean m = mockNn.mkdirs(anyString(), any(), anyBoolean());
    when(m).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} mkdirs({})", nsId, src);
        boolean createParent = (boolean) invocation.getArgument(2);
        if (createParent) {
            Path path = new Path(src).getParent();
            while (!path.isRoot()) {
                LOG.info("{} mkdir parent {}", nsId, path);
                fs.put(path.toString(), "DIRECTORY");
                path = path.getParent();
            }
        }
        fs.put(src, "DIRECTORY");
        return true;
    });
    when(mockNn.getServerDefaults()).thenAnswer(invocation -> {
        LOG.info("{} getServerDefaults", nsId);
        FsServerDefaults defaults = mock(FsServerDefaults.class);
        when(defaults.getChecksumType()).thenReturn(Type.valueOf(DataChecksum.CHECKSUM_CRC32));
        when(defaults.getKeyProviderUri()).thenReturn(nsId);
        return defaults;
    });
    when(mockNn.getContentSummary(anyString())).thenAnswer(invocation -> {
        String src = getSrc(invocation);
        LOG.info("{} getContentSummary({})", nsId, src);
        if (fs.get(src) == null) {
            throw new FileNotFoundException("File does not exist " + src);
        }
        if (!src.endsWith("/")) {
            src += "/";
        }
        Map<String, String> files = fs.subMap(src, src + Character.MAX_VALUE);
        int numFiles = 0;
        int numDirs = 0;
        int length = 0;
        for (Entry<String, String> entry : files.entrySet()) {
            String file = entry.getKey();
            if (file.substring(src.length()).indexOf('/') < 0) {
                String type = entry.getValue();
                if ("DIRECTORY".equals(type)) {
                    numDirs++;
                } else if ("FILE".equals(type)) {
                    numFiles++;
                    length += 100;
                }
            }
        }
        return new ContentSummary.Builder().fileCount(numFiles).directoryCount(numDirs).length(length).erasureCodingPolicy("").build();
    });
}
254612.5313138hadoop
protected void render(Block html) {
    String jid = $(JOB_ID);
    if (jid.isEmpty()) {
        html.p().__("Sorry, can't do anything without a JobID.").__();
        return;
    }
    JobId jobID = MRApps.toJobID(jid);
    Job j = appContext.getJob(jobID);
    if (j == null) {
        html.p().__("Sorry, ", jid, " not found.").__();
        return;
    }
    if (j instanceof UnparsedJob) {
        final int taskCount = j.getTotalMaps() + j.getTotalReduces();
        UnparsedJob oversizedJob = (UnparsedJob) j;
        html.p().__("The job has a total of " + taskCount + " tasks. ").__("Any job larger than " + oversizedJob.getMaxTasksAllowed() + " will not be loaded.").__();
        html.p().__("You can either use the CLI tool: 'mapred job -history'" + " to view large jobs or adjust the property " + JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX + ".").__();
        return;
    }
    List<AMInfo> amInfos = j.getAMInfos();
    JobInfo job = new JobInfo(j);
    ResponseInfo infoBlock = info("Job Overview").__("Job Name:", job.getName()).__("User Name:", job.getUserName()).__("Queue:", job.getQueueName()).__("State:", job.getState()).__("Uberized:", job.isUber()).__("Submitted:", new Date(job.getSubmitTime())).__("Started:", job.getStartTimeStr()).__("Finished:", new Date(job.getFinishTime())).__("Elapsed:", StringUtils.formatTime(Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
    String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
    List<String> diagnostics = j.getDiagnostics();
    if (diagnostics != null && !diagnostics.isEmpty()) {
        StringBuffer b = new StringBuffer();
        for (String diag : diagnostics) {
            b.append(addTaskLinks(diag));
        }
        infoBlock._r("Diagnostics:", b.toString());
    }
    if (job.getNumMaps() > 0) {
        infoBlock.__("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
    }
    if (job.getNumReduces() > 0) {
        infoBlock.__("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
        infoBlock.__("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
        infoBlock.__("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
    }
    for (ConfEntryInfo entry : job.getAcls()) {
        infoBlock.__("ACL " + entry.getName() + ":", entry.getValue());
    }
    DIV<Hamlet> div = html.__(InfoBlock.class).div(_INFO_WRAP);
    TABLE<DIV<Hamlet>> table = div.table("#job");
    table.tr().th(amString).__().tr().th(_TH, "Attempt Number").th(_TH, "Start Time").th(_TH, "Node").th(_TH, "Logs").__();
    boolean odd = false;
    for (AMInfo amInfo : amInfos) {
        AMAttemptInfo attempt = new AMAttemptInfo(amInfo, job.getId(), job.getUserName(), "", "");
        table.tr((odd = !odd) ? _ODD : _EVEN).td(String.valueOf(attempt.getAttemptId())).td(new Date(attempt.getStartTime()).toString()).td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress()).__().td().a(".logslink", url(attempt.getLogsLink()), "logs").__().__();
    }
    table.__();
    div.__();
    html.div(_INFO_WRAP).table("#job").tr().th(_TH, "Task Type").th(_TH, "Total").th(_TH, "Complete").__().tr(_ODD).th().a(url("tasks", jid, "m"), "Map").__().td(String.valueOf(String.valueOf(job.getMapsTotal()))).td(String.valueOf(String.valueOf(job.getMapsCompleted()))).__().tr(_EVEN).th().a(url("tasks", jid, "r"), "Reduce").__().td(String.valueOf(String.valueOf(job.getReducesTotal()))).td(String.valueOf(String.valueOf(job.getReducesCompleted()))).__().__().table("#job").tr().th(_TH, "Attempt Type").th(_TH, "Failed").th(_TH, "Killed").th(_TH, "Successful").__().tr(_ODD).th("Maps").td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(job.getFailedMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(job.getKilledMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(job.getSuccessfulMapAttempts())).__().__().tr(_EVEN).th("Reduces").td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(job.getFailedReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(job.getKilledReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(job.getSuccessfulReduceAttempts())).__().__().__().__();
}
255154.7617128hadoop
protected void render(Block html) {
    if (app.getJob() == null) {
        html.h2($(TITLE));
        return;
    }
    TaskType type = null;
    String symbol = $(TASK_TYPE);
    if (!symbol.isEmpty()) {
        type = MRApps.taskType(symbol);
    }
    THEAD<TABLE<Hamlet>> thead;
    if (type != null)
        thead = html.table("#" + app.getJob().getID() + type).$class("dt-tasks").thead();
    else
        thead = html.table("#tasks").thead();
    int attemptColSpan = type == TaskType.REDUCE ? 8 : 3;
    thead.tr().th().$colspan(5).$class("ui-state-default").__("Task").__().th().$colspan(attemptColSpan).$class("ui-state-default").__("Successful Attempt").__().__();
    TR<THEAD<TABLE<Hamlet>>> theadRow = thead.tr().th("Name").th("State").th("Start Time").th("Finish Time").th("Elapsed Time").th("Start Time");
    if (type == TaskType.REDUCE) {
        theadRow.th("Shuffle Finish Time");
        theadRow.th("Merge Finish Time");
    }
    theadRow.th("Finish Time");
    if (type == TaskType.REDUCE) {
        theadRow.th("Elapsed Time Shuffle");
        theadRow.th("Elapsed Time Merge");
        theadRow.th("Elapsed Time Reduce");
    }
    theadRow.th("Elapsed Time");
    TBODY<TABLE<Hamlet>> tbody = theadRow.__().__().tbody();
    StringBuilder tasksTableData = new StringBuilder("[\n");
    for (Task task : app.getJob().getTasks().values()) {
        if (type != null && task.getType() != type) {
            continue;
        }
        TaskInfo info = new TaskInfo(task);
        String tid = info.getId();
        long startTime = info.getStartTime();
        long finishTime = info.getFinishTime();
        long elapsed = info.getElapsedTime();
        long attemptStartTime = -1;
        long shuffleFinishTime = -1;
        long sortFinishTime = -1;
        long attemptFinishTime = -1;
        long elapsedShuffleTime = -1;
        long elapsedSortTime = -1;
        long elapsedReduceTime = -1;
        long attemptElapsed = -1;
        TaskAttempt successful = info.getSuccessful();
        if (successful != null) {
            TaskAttemptInfo ta;
            if (type == TaskType.REDUCE) {
                ReduceTaskAttemptInfo rta = new ReduceTaskAttemptInfo(successful);
                shuffleFinishTime = rta.getShuffleFinishTime();
                sortFinishTime = rta.getMergeFinishTime();
                elapsedShuffleTime = rta.getElapsedShuffleTime();
                elapsedSortTime = rta.getElapsedMergeTime();
                elapsedReduceTime = rta.getElapsedReduceTime();
                ta = rta;
            } else {
                ta = new MapTaskAttemptInfo(successful, false);
            }
            attemptStartTime = ta.getStartTime();
            attemptFinishTime = ta.getFinishTime();
            attemptElapsed = ta.getElapsedTime();
        }
        tasksTableData.append("[\"").append("<a href='" + url("task", tid)).append("'>").append(tid).append("</a>\",\"").append(info.getState()).append("\",\"").append(startTime).append("\",\"").append(finishTime).append("\",\"").append(elapsed).append("\",\"").append(attemptStartTime).append("\",\"");
        if (type == TaskType.REDUCE) {
            tasksTableData.append(shuffleFinishTime).append("\",\"").append(sortFinishTime).append("\",\"");
        }
        tasksTableData.append(attemptFinishTime).append("\",\"");
        if (type == TaskType.REDUCE) {
            tasksTableData.append(elapsedShuffleTime).append("\",\"").append(elapsedSortTime).append("\",\"").append(elapsedReduceTime).append("\",\"");
        }
        tasksTableData.append(attemptElapsed).append("\"],\n");
    }
    if (tasksTableData.charAt(tasksTableData.length() - 2) == ',') {
        tasksTableData.delete(tasksTableData.length() - 2, tasksTableData.length() - 1);
    }
    tasksTableData.append("]");
    html.script().$type("text/javascript").__("var tasksTableData=" + tasksTableData).__();
    TR<TFOOT<TABLE<Hamlet>>> footRow = tbody.__().tfoot().tr();
    footRow.th().input("search_init").$type(InputType.text).$name("task").$value("ID").__().__().th().input("search_init").$type(InputType.text).$name("state").$value("State").__().__().th().input("search_init").$type(InputType.text).$name("start_time").$value("Start Time").__().__().th().input("search_init").$type(InputType.text).$name("finish_time").$value("Finish Time").__().__().th().input("search_init").$type(InputType.text).$name("elapsed_time").$value("Elapsed Time").__().__().th().input("search_init").$type(InputType.text).$name("attempt_start_time").$value("Start Time").__().__();
    if (type == TaskType.REDUCE) {
        footRow.th().input("search_init").$type(InputType.text).$name("shuffle_time").$value("Shuffle Time").__().__();
        footRow.th().input("search_init").$type(InputType.text).$name("merge_time").$value("Merge Time").__().__();
    }
    footRow.th().input("search_init").$type(InputType.text).$name("attempt_finish").$value("Finish Time").__().__();
    if (type == TaskType.REDUCE) {
        footRow.th().input("search_init").$type(InputType.text).$name("elapsed_shuffle_time").$value("Elapsed Shuffle Time").__().__();
        footRow.th().input("search_init").$type(InputType.text).$name("elapsed_merge_time").$value("Elapsed Merge Time").__().__();
        footRow.th().input("search_init").$type(InputType.text).$name("elapsed_reduce_time").$value("Elapsed Reduce Time").__().__();
    }
    footRow.th().input("search_init").$type(InputType.text).$name("attempt_elapsed").$value("Elapsed Time").__().__();
    footRow.__().__().__();
}
254002.681181hadoop
private String createApplicationCLIHelpMessage() throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintWriter pw = new PrintWriter(baos);
    pw.println("usage: application");
    pw.println(" -appId <Application ID>                  Specify Application Id to be");
    pw.println("                                          operated");
    pw.println(" -appStates <States>                      Works with -list to filter");
    pw.println("                                          applications based on input");
    pw.println("                                          comma-separated list of");
    pw.println("                                          application states. The valid");
    pw.println("                                          application state can be one of");
    pw.println("                                          the following:");
    pw.println("                                          ALL,NEW,NEW_SAVING,SUBMITTED,ACC");
    pw.println("                                          EPTED,RUNNING,FINISHED,FAILED,KI");
    pw.println("                                          LLED");
    pw.println(" -appTags <Tags>                          Works with -list to filter");
    pw.println("                                          applications based on input");
    pw.println("                                          comma-separated list of");
    pw.println("                                          application tags.");
    pw.println(" -appTypes <Types>                        Works with -list to filter");
    pw.println("                                          applications based on input");
    pw.println("                                          comma-separated list of");
    pw.println("                                          application types.");
    pw.println(" -autoFinalize                            Works with -upgrade and");
    pw.println("                                          -initiate options to initiate");
    pw.println("                                          the upgrade of the application");
    pw.println("                                          with the ability to finalize the");
    pw.println("                                          upgrade automatically.");
    pw.println(" -cancel                                  Works with -upgrade option to");
    pw.println("                                          cancel current upgrade.");
    pw.println(" -changeQueue <Queue Name>                Moves application to a new");
    pw.println("                                          queue. ApplicationId can be");
    pw.println("                                          passed using 'appId' option.");
    pw.println("                                          'movetoqueue' command is");
    pw.println("                                          deprecated, this new command");
    pw.println("                                          'changeQueue' performs same");
    pw.println("                                          functionality.");
    pw.println(" -clusterId <Cluster ID>                  ClusterId. By default, it will");
    pw.println("                                          take default cluster id from the");
    pw.println("                                          RM");
    pw.println(" -component <Component Name> <Count>      Works with -flex option to");
    pw.println("                                          change the number of");
    pw.println("                                          components/containers running");
    pw.println("                                          for an application /");
    pw.println("                                          long-running service. Supports");
    pw.println("                                          absolute or relative changes,");
    pw.println("                                          such as +1, 2, or -3.");
    pw.println(" -components <Components>                 Works with -upgrade option to");
    pw.println("                                          trigger the upgrade of specified");
    pw.println("                                          components of the application.");
    pw.println("                                          Multiple components should be");
    pw.println("                                          separated by commas.");
    pw.println(" -decommission <Application Name>         Decommissions component");
    pw.println("                                          instances for an application /");
    pw.println("                                          long-running service. Requires");
    pw.println("                                          -instances option. Supports");
    pw.println("                                          -appTypes option to specify");
    pw.println("                                          which client implementation to");
    pw.println("                                          use.");
    pw.println(" -destroy <Application Name>              Destroys a saved application");
    pw.println("                                          specification and removes all");
    pw.println("                                          application data permanently.");
    pw.println("                                          Supports -appTypes option to");
    pw.println("                                          specify which client");
    pw.println("                                          implementation to use.");
    pw.println(" -enableFastLaunch <Destination Folder>   Uploads AM dependencies to HDFS");
    pw.println("                                          to make future launches faster.");
    pw.println("                                          Supports -appTypes option to");
    pw.println("                                          specify which client");
    pw.println("                                          implementation to use.");
    pw.println("                                          Optionally a destination folder");
    pw.println("                                          for the tarball can be");
    pw.println("                                          specified.");
    pw.println(" -express <arg>                           Works with -upgrade option to");
    pw.println("                                          perform express upgrade.  It");
    pw.println("                                          requires the upgraded");
    pw.println("                                          application specification file.");
    pw.println(" -finalize                                Works with -upgrade option to");
    pw.println("                                          finalize the upgrade.");
    pw.println(" -flex <Application Name or ID>           Changes number of running");
    pw.println("                                          containers for a component of an");
    pw.println("                                          application / long-running");
    pw.println("                                          service. Requires -component");
    pw.println("                                          option. If name is provided,");
    pw.println("                                          appType must be provided unless");
    pw.println("                                          it is the default yarn-service.");
    pw.println("                                          If ID is provided, the appType");
    pw.println("                                          will be looked up. Supports");
    pw.println("                                          -appTypes option to specify");
    pw.println("                                          which client implementation to");
    pw.println("                                          use.");
    pw.println(" -help                                    Displays help for all commands.");
    pw.println(" -initiate <File Name>                    Works with -upgrade option to");
    pw.println("                                          initiate the application");
    pw.println("                                          upgrade. It requires the");
    pw.println("                                          upgraded application");
    pw.println("                                          specification file.");
    pw.println(" -instances <Component Instances>         Works with -upgrade option to");
    pw.println("                                          trigger the upgrade of specified");
    pw.println("                                          component instances of the");
    pw.println("                                          application. Also works with");
    pw.println("                                          -decommission option to");
    pw.println("                                          decommission specified component");
    pw.println("                                          instances. Multiple instances");
    pw.println("                                          should be separated by commas.");
    pw.println(" -kill <Application ID>                   Kills the application. Set of");
    pw.println("                                          applications can be provided");
    pw.println("                                          separated with space");
    pw.println(" -launch <Application Name> <File Name>   Launches application from");
    pw.println("                                          specification file (saves");
    pw.println("                                          specification and starts");
    pw.println("                                          application). Options");
    pw.println("                                          -updateLifetime and -changeQueue");
    pw.println("                                          can be specified to alter the");
    pw.println("                                          values provided in the file.");
    pw.println("                                          Supports -appTypes option to");
    pw.println("                                          specify which client");
    pw.println("                                          implementation to use.");
    pw.println(" -list                                    List applications. Supports");
    pw.println("                                          optional use of -appTypes to");
    pw.println("                                          filter applications based on");
    pw.println("                                          application type, -appStates to");
    pw.println("                                          filter applications based on");
    pw.println("                                          application state and -appTags");
    pw.println("                                          to filter applications based on");
    pw.println("                                          application tag.");
    pw.println(" -movetoqueue <Application ID>            Moves the application to a");
    pw.println("                                          different queue. Deprecated");
    pw.println("                                          command. Use 'changeQueue'");
    pw.println("                                          instead.");
    pw.println(" -queue <Queue Name>                      Works with the movetoqueue");
    pw.println("                                          command to specify which queue");
    pw.println("                                          to move an application to.");
    pw.println(" -save <Application Name> <File Name>     Saves specification file for an");
    pw.println("                                          application. Options");
    pw.println("                                          -updateLifetime and -changeQueue");
    pw.println("                                          can be specified to alter the");
    pw.println("                                          values provided in the file.");
    pw.println("                                          Supports -appTypes option to");
    pw.println("                                          specify which client");
    pw.println("                                          implementation to use.");
    pw.println(" -start <Application Name>                Starts a previously saved");
    pw.println("                                          application. Supports -appTypes");
    pw.println("                                          option to specify which client");
    pw.println("                                          implementation to use.");
    pw.println(" -status <Application Name or ID>         Prints the status of the");
    pw.println("                                          application. If app ID is");
    pw.println("                                          provided, it prints the generic");
    pw.println("                                          YARN application status. If name");
    pw.println("                                          is provided, it prints the");
    pw.println("                                          application specific status");
    pw.println("                                          based on app's own");
    pw.println("                                          implementation, and -appTypes");
    pw.println("                                          option must be specified unless");
    pw.println("                                          it is the default yarn-service");
    pw.println("                                          type.");
    pw.println(" -stop <Application Name or ID>           Stops application gracefully");
    pw.println("                                          (may be started again later). If");
    pw.println("                                          name is provided, appType must");
    pw.println("                                          be provided unless it is the");
    pw.println("                                          default yarn-service. If ID is");
    pw.println("                                          provided, the appType will be");
    pw.println("                                          looked up. Supports -appTypes");
    pw.println("                                          option to specify which client");
    pw.println("                                          implementation to use.");
    pw.println(" -updateLifetime <Timeout>                update timeout of an application");
    pw.println("                                          from NOW. ApplicationId can be");
    pw.println("                                          passed using 'appId' option.");
    pw.println("                                          Timeout value is in seconds.");
    pw.println(" -updatePriority <Priority>               update priority of an");
    pw.println("                                          application. ApplicationId can");
    pw.println("                                          be passed using 'appId' option.");
    pw.println(" -upgrade <Application Name>              Upgrades an");
    pw.println("                                          application/long-running");
    pw.println("                                          service. It requires either");
    pw.println("                                          -initiate, -instances, or");
    pw.println("                                          -finalize options.");
    pw.close();
    String appsHelpStr = baos.toString("UTF-8");
    return appsHelpStr;
}
251485.2345130hadoop
public AppsInfo getApps(HttpServletRequest req, HttpServletResponse res, String stateQuery, Set<String> statesQuery, String finalStatusQuery, String userQuery, String queueQuery, String count, String startedBegin, String startedEnd, String finishBegin, String finishEnd, String nameQuery, Set<String> applicationTypes) {
    UserGroupInformation callerUGI = getUser(req);
    boolean checkEnd = false;
    boolean checkAppTypes = false;
    boolean checkAppStates = false;
    long countNum = Long.MAX_VALUE;
    long sBegin = 0;
    long sEnd = Long.MAX_VALUE;
    long fBegin = 0;
    long fEnd = Long.MAX_VALUE;
    if (count != null && !count.isEmpty()) {
        countNum = Long.parseLong(count);
        if (countNum <= 0) {
            throw new BadRequestException("limit value must be greater then 0");
        }
    }
    if (startedBegin != null && !startedBegin.isEmpty()) {
        sBegin = Long.parseLong(startedBegin);
        if (sBegin < 0) {
            throw new BadRequestException("startedTimeBegin must be greater than 0");
        }
    }
    if (startedEnd != null && !startedEnd.isEmpty()) {
        sEnd = Long.parseLong(startedEnd);
        if (sEnd < 0) {
            throw new BadRequestException("startedTimeEnd must be greater than 0");
        }
    }
    if (sBegin > sEnd) {
        throw new BadRequestException("startedTimeEnd must be greater than startTimeBegin");
    }
    if (finishBegin != null && !finishBegin.isEmpty()) {
        checkEnd = true;
        fBegin = Long.parseLong(finishBegin);
        if (fBegin < 0) {
            throw new BadRequestException("finishTimeBegin must be greater than 0");
        }
    }
    if (finishEnd != null && !finishEnd.isEmpty()) {
        checkEnd = true;
        fEnd = Long.parseLong(finishEnd);
        if (fEnd < 0) {
            throw new BadRequestException("finishTimeEnd must be greater than 0");
        }
    }
    if (fBegin > fEnd) {
        throw new BadRequestException("finishTimeEnd must be greater than finishTimeBegin");
    }
    Set<String> appTypes = parseQueries(applicationTypes, false);
    if (!appTypes.isEmpty()) {
        checkAppTypes = true;
    }
    if (stateQuery != null && !stateQuery.isEmpty()) {
        statesQuery.add(stateQuery);
    }
    Set<String> appStates = parseQueries(statesQuery, true);
    if (!appStates.isEmpty()) {
        checkAppStates = true;
    }
    AppsInfo allApps = new AppsInfo();
    Collection<ApplicationReport> appReports = null;
    final GetApplicationsRequest request = GetApplicationsRequest.newInstance();
    request.setLimit(countNum);
    request.setStartRange(Range.between(sBegin, sEnd));
    try {
        if (callerUGI == null) {
            appReports = getApplicationsReport(request);
        } else {
            appReports = callerUGI.doAs(new PrivilegedExceptionAction<Collection<ApplicationReport>>() {

                @Override
                public Collection<ApplicationReport> run() throws Exception {
                    return getApplicationsReport(request);
                }
            });
        }
    } catch (Exception e) {
        rewrapAndThrowException(e);
    }
    if (appReports == null) {
        return allApps;
    }
    for (ApplicationReport appReport : appReports) {
        if (checkAppStates && !appStates.contains(StringUtils.toLowerCase(appReport.getYarnApplicationState().toString()))) {
            continue;
        }
        if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) {
            FinalApplicationStatus.valueOf(finalStatusQuery);
            if (!appReport.getFinalApplicationStatus().toString().equalsIgnoreCase(finalStatusQuery)) {
                continue;
            }
        }
        if (userQuery != null && !userQuery.isEmpty()) {
            if (!appReport.getUser().equals(userQuery)) {
                continue;
            }
        }
        if (queueQuery != null && !queueQuery.isEmpty()) {
            if (appReport.getQueue() == null || !appReport.getQueue().equals(queueQuery)) {
                continue;
            }
        }
        if (checkAppTypes && !appTypes.contains(StringUtils.toLowerCase(appReport.getApplicationType().trim()))) {
            continue;
        }
        if (checkEnd && (appReport.getFinishTime() < fBegin || appReport.getFinishTime() > fEnd)) {
            continue;
        }
        if (nameQuery != null && !nameQuery.equals(appReport.getName())) {
            continue;
        }
        AppInfo app = new AppInfo(appReport);
        allApps.add(app);
    }
    return allApps;
}
256053.871152hadoop
public void testReservation() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    cs.getNodeTracker().addNode(node_0);
    cs.getNodeTracker().addNode(node_1);
    cs.getNodeTracker().addNode(node_2);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(22 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(19 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(16 * GB, a.getMetrics().getAvailableMB());
    assertEquals(16 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(11 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(18 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(6 * GB, a.getMetrics().getAvailableMB());
    assertEquals(6 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(1, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(18 * GB, a.getUsedResources().getMemorySize());
    assertEquals(18 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(18 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(6 * GB, a.getMetrics().getAvailableMB());
    assertEquals(6 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(0, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
}
256053.871152hadoop
public void testReservationNoContinueLook() throws Exception {
    queues = new CSQueueStore();
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setBoolean(CapacitySchedulerConfiguration.RESERVE_CONT_LOOK_ALL_NODES, false);
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(22 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(19 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(16 * GB, a.getMetrics().getAvailableMB());
    assertEquals(16 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(11 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(18 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(6 * GB, a.getMetrics().getAvailableMB());
    assertEquals(6 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(1, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(18 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(6 * GB, a.getMetrics().getAvailableMB());
    assertEquals(6 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize());
    assertEquals(1, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
}
254569.413162hadoop
private void testNMTokens(Configuration testConf) throws Exception {
    NMTokenSecretManagerInRM nmTokenSecretManagerRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
    NMTokenSecretManagerInNM nmTokenSecretManagerNM = yarnCluster.getNodeManager(0).getNMContext().getNMTokenSecretManager();
    RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
    NodeManager nm = yarnCluster.getNodeManager(0);
    waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM);
    assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(), nmTokenSecretManagerRM.getCurrentKey().getKeyId());
    YarnRPC rpc = YarnRPC.create(testConf);
    String user = "test";
    Resource r = Resource.newInstance(1024, 1);
    ApplicationId appId = ApplicationId.newInstance(1, 1);
    MockRMApp m = new MockRMApp(appId.getId(), appId.getClusterTimestamp(), RMAppState.NEW);
    yarnCluster.getResourceManager().getRMContext().getRMApps().put(appId, m);
    ApplicationAttemptId validAppAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId validContainerId = ContainerId.newContainerId(validAppAttemptId, 0);
    NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
    NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
    org.apache.hadoop.yarn.api.records.Token validNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
    org.apache.hadoop.yarn.api.records.Token validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(10), 1234);
    ContainerTokenIdentifier identifier = BuilderUtils.newContainerTokenIdentifier(validContainerToken);
    assertEquals(Priority.newInstance(10), identifier.getPriority());
    assertEquals(1234, identifier.getCreationTime());
    StringBuilder sb;
    NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(testConf);
    tempManager.rollMasterKey();
    do {
        tempManager.rollMasterKey();
        tempManager.activateNextMasterKey();
    } while (tempManager.getCurrentKey().getKeyId() == nmTokenSecretManagerRM.getCurrentKey().getKeyId());
    if (UserGroupInformation.isSecurityEnabled()) {
        sb = new StringBuilder("Client cannot authenticate via:[TOKEN]");
    } else {
        sb = new StringBuilder("SIMPLE authentication is not enabled.  Available:[TOKEN]");
    }
    String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, null, true);
    assertTrue(errorMsg.contains(sb.toString()), "In calling " + validNode + " exception was '" + errorMsg + "' but doesn't contain '" + sb.toString() + "'");
    org.apache.hadoop.yarn.api.records.Token invalidNMToken = tempManager.createNMToken(validAppAttemptId, validNode, user);
    sb = new StringBuilder("Given NMToken for application : ");
    sb.append(validAppAttemptId.toString()).append(" seems to have been generated illegally.");
    assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
    invalidNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, invalidNode, user);
    sb = new StringBuilder("Given NMToken for application : ");
    sb.append(validAppAttemptId).append(" is not valid for current node manager.expected : ").append(validNode.toString()).append(" found : ").append(invalidNode.toString());
    assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
    testConf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 4 * 60 * 1000);
    validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(0), 0);
    assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, false).isEmpty());
    assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId));
    ApplicationAttemptId validAppAttemptId2 = ApplicationAttemptId.newInstance(appId, 2);
    ContainerId validContainerId2 = ContainerId.newContainerId(validAppAttemptId2, 0);
    org.apache.hadoop.yarn.api.records.Token validContainerToken2 = containerTokenSecretManager.createContainerToken(validContainerId2, 0, validNode, user, r, Priority.newInstance(0), 0);
    org.apache.hadoop.yarn.api.records.Token validNMToken2 = nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
    NMTokenIdentifier newIdentifier = new NMTokenIdentifier();
    byte[] tokenIdentifierContent = validNMToken2.getIdentifier().array();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
    newIdentifier.readFields(dib);
    NMTokenIdentifierNewForTest newVersionIdentifier = new NMTokenIdentifierNewForTest(newIdentifier, "message");
    assertEquals(validAppAttemptId2.getAttemptId(), newVersionIdentifier.getApplicationAttemptId().getAttemptId(), "The ApplicationAttemptId is changed after set to " + "newVersionIdentifier");
    assertEquals("message", newVersionIdentifier.getMessage(), "The message is changed after set to newVersionIdentifier");
    assertEquals(validNode, newVersionIdentifier.getNodeId(), "The NodeId is changed after set to newVersionIdentifier");
    org.apache.hadoop.yarn.api.records.Token newVersionedNMToken = BaseNMTokenSecretManager.newInstance(nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier), newVersionIdentifier);
    assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode, validContainerToken2, newVersionedNMToken, false).isEmpty());
    assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId2));
    waitForContainerToFinishOnNM(validContainerId);
    sb = new StringBuilder("Attempt to relaunch the same container with id ");
    sb.append(validContainerId);
    assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, true).contains(sb.toString()));
    testStopContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false);
    rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
    rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
    sb = new StringBuilder("Container ");
    sb.append(validContainerId).append(" was recently stopped on node manager");
    assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, true).contains(sb.toString()));
    nm.getNodeStatusUpdater().clearFinishedContainersFromCache();
    sb = new StringBuilder("Container ").append(validContainerId.toString()).append(" is not handled by this NodeManager");
    assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false).contains(sb.toString()));
    ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
    Token attempt1NMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
    org.apache.hadoop.yarn.api.records.Token newContainerToken = containerTokenSecretManager.createContainerToken(ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r, Priority.newInstance(0), 0);
    assertTrue(testStartContainer(rpc, attempt2, validNode, newContainerToken, attempt1NMToken, false).isEmpty());
}
254147.8913145hadoop
public void testReadAppsIsRelatedTo() throws Exception {
    TimelineFilterList irt = new TimelineFilterList(Operator.OR);
    irt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    irt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    int isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_3333");
        }
    }
    assertEquals(3, isRelatedToCnt);
    TimelineFilterList irt1 = new TimelineFilterList();
    irt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3"))));
    irt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity id should have been application_1111111111_4444");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt2 = new TimelineFilterList(Operator.OR);
    irt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    irt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_3333");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt3 = new TimelineFilterList();
    irt3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity id should have been application_1111111111_3333");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt4 = new TimelineFilterList();
    irt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3"))));
    irt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList irt5 = new TimelineFilterList();
    irt5.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto7"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto4"))));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    TimelineFilterList irt6 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("application_1111111111_3333")) {
            Assert.fail("Entity id should have been application_1111111111_3333");
        }
    }
    assertEquals(0, isRelatedToCnt);
}
252813.8826146kafka
protected void login(LoginContext loginContext) throws LoginException {
    loginContext.login();
}
252813.8826146kafka
protected void login(LoginContext loginContext) throws LoginException {
    loginContext.login();
}
251812.127781kafka
private AbstractRequest getRequest(ApiKeys apikey, short version) {
    switch(apikey) {
        case PRODUCE:
            return createProduceRequest(version);
        case FETCH:
            return createFetchRequest(version);
        case LIST_OFFSETS:
            return createListOffsetRequest(version);
        case METADATA:
            return createMetadataRequest(version, singletonList("topic1"));
        case LEADER_AND_ISR:
            return createLeaderAndIsrRequest(version);
        case STOP_REPLICA:
            return createStopReplicaRequest(version, true);
        case UPDATE_METADATA:
            return createUpdateMetadataRequest(version, "rack1");
        case CONTROLLED_SHUTDOWN:
            return createControlledShutdownRequest(version);
        case OFFSET_COMMIT:
            return createOffsetCommitRequest(version);
        case OFFSET_FETCH:
            return createOffsetFetchRequest(version, true);
        case FIND_COORDINATOR:
            return createFindCoordinatorRequest(version);
        case JOIN_GROUP:
            return createJoinGroupRequest(version);
        case HEARTBEAT:
            return createHeartBeatRequest(version);
        case LEAVE_GROUP:
            return createLeaveGroupRequest(version);
        case SYNC_GROUP:
            return createSyncGroupRequest(version);
        case DESCRIBE_GROUPS:
            return createDescribeGroupRequest(version);
        case LIST_GROUPS:
            return createListGroupsRequest(version);
        case SASL_HANDSHAKE:
            return createSaslHandshakeRequest(version);
        case API_VERSIONS:
            return createApiVersionRequest(version);
        case CREATE_TOPICS:
            return createCreateTopicRequest(version);
        case DELETE_TOPICS:
            return createDeleteTopicsRequest(version);
        case DELETE_RECORDS:
            return createDeleteRecordsRequest(version);
        case INIT_PRODUCER_ID:
            return createInitPidRequest(version);
        case OFFSET_FOR_LEADER_EPOCH:
            return createLeaderEpochRequestForReplica(version, 1);
        case ADD_PARTITIONS_TO_TXN:
            return createAddPartitionsToTxnRequest(version);
        case ADD_OFFSETS_TO_TXN:
            return createAddOffsetsToTxnRequest(version);
        case END_TXN:
            return createEndTxnRequest(version);
        case WRITE_TXN_MARKERS:
            return createWriteTxnMarkersRequest(version);
        case TXN_OFFSET_COMMIT:
            return createTxnOffsetCommitRequest(version);
        case DESCRIBE_ACLS:
            return createDescribeAclsRequest(version);
        case CREATE_ACLS:
            return createCreateAclsRequest(version);
        case DELETE_ACLS:
            return createDeleteAclsRequest(version);
        case DESCRIBE_CONFIGS:
            return createDescribeConfigsRequest(version);
        case ALTER_CONFIGS:
            return createAlterConfigsRequest(version);
        case ALTER_REPLICA_LOG_DIRS:
            return createAlterReplicaLogDirsRequest(version);
        case DESCRIBE_LOG_DIRS:
            return createDescribeLogDirsRequest(version);
        case SASL_AUTHENTICATE:
            return createSaslAuthenticateRequest(version);
        case CREATE_PARTITIONS:
            return createCreatePartitionsRequest(version);
        case CREATE_DELEGATION_TOKEN:
            return createCreateTokenRequest(version);
        case RENEW_DELEGATION_TOKEN:
            return createRenewTokenRequest(version);
        case EXPIRE_DELEGATION_TOKEN:
            return createExpireTokenRequest(version);
        case DESCRIBE_DELEGATION_TOKEN:
            return createDescribeTokenRequest(version);
        case DELETE_GROUPS:
            return createDeleteGroupsRequest(version);
        case ELECT_LEADERS:
            return createElectLeadersRequest(version);
        case INCREMENTAL_ALTER_CONFIGS:
            return createIncrementalAlterConfigsRequest(version);
        case ALTER_PARTITION_REASSIGNMENTS:
            return createAlterPartitionReassignmentsRequest(version);
        case LIST_PARTITION_REASSIGNMENTS:
            return createListPartitionReassignmentsRequest(version);
        case OFFSET_DELETE:
            return createOffsetDeleteRequest(version);
        case DESCRIBE_CLIENT_QUOTAS:
            return createDescribeClientQuotasRequest(version);
        case ALTER_CLIENT_QUOTAS:
            return createAlterClientQuotasRequest(version);
        case DESCRIBE_USER_SCRAM_CREDENTIALS:
            return createDescribeUserScramCredentialsRequest(version);
        case ALTER_USER_SCRAM_CREDENTIALS:
            return createAlterUserScramCredentialsRequest(version);
        case VOTE:
            return createVoteRequest(version);
        case BEGIN_QUORUM_EPOCH:
            return createBeginQuorumEpochRequest(version);
        case END_QUORUM_EPOCH:
            return createEndQuorumEpochRequest(version);
        case DESCRIBE_QUORUM:
            return createDescribeQuorumRequest(version);
        case ALTER_PARTITION:
            return createAlterPartitionRequest(version);
        case UPDATE_FEATURES:
            return createUpdateFeaturesRequest(version);
        case ENVELOPE:
            return createEnvelopeRequest(version);
        case FETCH_SNAPSHOT:
            return createFetchSnapshotRequest(version);
        case DESCRIBE_CLUSTER:
            return createDescribeClusterRequest(version);
        case DESCRIBE_PRODUCERS:
            return createDescribeProducersRequest(version);
        case BROKER_REGISTRATION:
            return createBrokerRegistrationRequest(version);
        case BROKER_HEARTBEAT:
            return createBrokerHeartbeatRequest(version);
        case UNREGISTER_BROKER:
            return createUnregisterBrokerRequest(version);
        case DESCRIBE_TRANSACTIONS:
            return createDescribeTransactionsRequest(version);
        case LIST_TRANSACTIONS:
            return createListTransactionsRequest(version);
        case ALLOCATE_PRODUCER_IDS:
            return createAllocateProducerIdsRequest(version);
        case CONSUMER_GROUP_HEARTBEAT:
            return createConsumerGroupHeartbeatRequest(version);
        case CONSUMER_GROUP_DESCRIBE:
            return createConsumerGroupDescribeRequest(version);
        case CONTROLLER_REGISTRATION:
            return createControllerRegistrationRequest(version);
        case GET_TELEMETRY_SUBSCRIPTIONS:
            return createGetTelemetrySubscriptionsRequest(version);
        case PUSH_TELEMETRY:
            return createPushTelemetryRequest(version);
        case ASSIGN_REPLICAS_TO_DIRS:
            return createAssignReplicasToDirsRequest(version);
        case LIST_CLIENT_METRICS_RESOURCES:
            return createListClientMetricsResourcesRequest(version);
        case DESCRIBE_TOPIC_PARTITIONS:
            return createDescribeTopicPartitionsRequest(version);
        default:
            throw new IllegalArgumentException("Unknown API key " + apikey);
    }
}
255008.327151kafka
 void testDescribeTopicPartitionsRequestWithEdgeCases() {
    Authorizer authorizer = mock(Authorizer.class);
    String authorizedTopic = "authorized-topic1";
    String authorizedTopic2 = "authorized-topic2";
    Action expectedActions1 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true);
    Action expectedActions2 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic2, PatternType.LITERAL), 1, true, true);
    when(authorizer.authorize(any(RequestContext.class), argThat(t -> t.contains(expectedActions1) || t.contains(expectedActions2)))).thenAnswer(invocation -> {
        List<Action> actions = invocation.getArgument(1);
        return actions.stream().map(action -> {
            if (action.resourcePattern().name().startsWith("authorized"))
                return AuthorizationResult.ALLOWED;
            else
                return AuthorizationResult.DENIED;
        }).collect(Collectors.toList());
    });
    Uuid authorizedTopicId = Uuid.randomUuid();
    Uuid authorizedTopicId2 = Uuid.randomUuid();
    Map<String, Uuid> topicIds = new HashMap<>();
    topicIds.put(authorizedTopic, authorizedTopicId);
    topicIds.put(authorizedTopic2, authorizedTopicId2);
    BrokerEndpointCollection collection = new BrokerEndpointCollection();
    collection.add(new BrokerEndpoint().setName(broker.endpoints().get(0).listener()).setHost(broker.endpoints().get(0).host()).setPort(broker.endpoints().get(0).port()).setSecurityProtocol(broker.endpoints().get(0).securityProtocol()));
    List<ApiMessage> records = Arrays.asList(new RegisterBrokerRecord().setBrokerId(broker.id()).setBrokerEpoch(0).setIncarnationId(Uuid.randomUuid()).setEndPoints(collection).setRack(broker.rack()).setFenced(false), new TopicRecord().setName(authorizedTopic).setTopicId(topicIds.get(authorizedTopic)), new TopicRecord().setName(authorizedTopic2).setTopicId(topicIds.get(authorizedTopic2)), new PartitionRecord().setTopicId(authorizedTopicId).setPartitionId(0).setReplicas(Arrays.asList(0, 1, 2)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(2)).setLeaderEpoch(0).setPartitionEpoch(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord().setTopicId(authorizedTopicId).setPartitionId(1).setReplicas(Arrays.asList(0, 1, 2)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(2)).setLeaderEpoch(0).setPartitionEpoch(1).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord().setTopicId(authorizedTopicId2).setPartitionId(0).setReplicas(Arrays.asList(0, 1, 3)).setLeader(0).setIsr(Arrays.asList(0)).setEligibleLeaderReplicas(Arrays.asList(1)).setLastKnownElr(Arrays.asList(3)).setLeaderEpoch(0).setPartitionEpoch(2).setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()));
    KRaftMetadataCache metadataCache = new KRaftMetadataCache(0);
    updateKraftMetadataCache(metadataCache, records);
    DescribeTopicPartitionsRequestHandler handler = new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizer)), createKafkaDefaultConfig());
    DescribeTopicPartitionsRequest describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2))).setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(1)));
    RequestChannel.Request request;
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    DescribeTopicPartitionsResponseData response = handler.handleDescribeTopicPartitionsRequest(request);
    List<DescribeTopicPartitionsResponseTopic> topics = response.topics().valuesList();
    assertEquals(2, topics.size());
    DescribeTopicPartitionsResponseTopic topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    topicToCheck = topics.get(1);
    assertEquals(authorizedTopicId2, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic2, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2))).setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic2).setPartitionIndex(0)));
    try {
        request = buildRequest(describeTopicPartitionsRequest, plaintextListener);
    } catch (Exception e) {
        fail(e.getMessage());
        return;
    }
    response = handler.handleDescribeTopicPartitionsRequest(request);
    topics = response.topics().valuesList();
    assertEquals(1, topics.size());
    topicToCheck = topics.get(0);
    assertEquals(authorizedTopicId2, topicToCheck.topicId());
    assertEquals(Errors.NONE.code(), topicToCheck.errorCode());
    assertEquals(authorizedTopic2, topicToCheck.name());
    assertEquals(1, topicToCheck.partitions().size());
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2))).setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName("Non-existing").setPartitionIndex(0)));
    try {
        handler.handleDescribeTopicPartitionsRequest(buildRequest(describeTopicPartitionsRequest, plaintextListener));
    } catch (Exception e) {
        assertInstanceOf(InvalidRequestException.class, e, e.getMessage());
    }
    describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData().setTopics(Arrays.asList(new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2))).setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(-1)));
    try {
        handler.handleDescribeTopicPartitionsRequest(buildRequest(describeTopicPartitionsRequest, plaintextListener));
    } catch (Exception e) {
        assertInstanceOf(InvalidRequestException.class, e, e.getMessage());
    }
}
254886.931162kafka
public void testLastConsumerProtocolMemberLeavingConsumerGroup() {
    String groupId = "group-id";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = Collections.singletonList(new ConsumerGroupMemberMetadataValue.ClassicProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))))));
    ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setRebalanceTimeoutMs(45000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(protocols)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build();
    ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setRebalanceTimeoutMs(45000).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(member1).withMember(member2).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    context.replay(CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
        }
    }));
    context.commit();
    ConsumerGroup consumerGroup = context.groupMetadataManager.consumerGroup(groupId);
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH).setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Collections.emptyList()));
    byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, assignment);
        }
    };
    ClassicGroup expectedClassicGroup = new ClassicGroup(new LogContext(), groupId, STABLE, context.time, context.metrics, 10, Optional.ofNullable(ConsumerProtocol.PROTOCOL_TYPE), Optional.ofNullable("range"), Optional.ofNullable(memberId1), Optional.of(context.time.milliseconds()));
    expectedClassicGroup.add(new ClassicGroupMember(memberId1, Optional.ofNullable(member1.instanceId()), member1.clientId(), member1.clientHost(), member1.rebalanceTimeoutMs(), member1.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, member1.supportedJoinGroupRequestProtocols(), assignment));
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newGroupSubscriptionMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()));
    assertUnorderedListEquals(expectedRecords.subList(0, 2), result.records().subList(0, 2));
    assertUnorderedListEquals(expectedRecords.subList(2, 4), result.records().subList(2, 4));
    assertRecordEquals(expectedRecords.get(4), result.records().get(4));
    assertUnorderedListEquals(expectedRecords.subList(5, 7), result.records().subList(5, 7));
    assertRecordsEquals(expectedRecords.subList(7, 10), result.records().subList(7, 10));
    verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null);
    verify(context.metrics, times(1)).onClassicGroupStateTransition(null, STABLE);
    ScheduledTimeout<Void, CoordinatorRecord> heartbeatTimeout = context.timer.timeout(classicGroupHeartbeatKey(groupId, memberId1));
    assertNotNull(heartbeatTimeout);
    ScheduledTimeout<Void, CoordinatorRecord> groupJoinTimeout = context.timer.timeout(classicGroupJoinKey(groupId));
    assertNotNull(groupJoinTimeout);
    ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    assertTrue(classicGroup.isInState(PREPARING_REBALANCE));
    result.appendFuture().completeExceptionally(new NotLeaderOrFollowerException());
    context.rollback();
    assertEquals(consumerGroup, context.groupMetadataManager.consumerGroup(groupId));
    verify(context.metrics, times(1)).onClassicGroupStateTransition(PREPARING_REBALANCE, null);
}
254132.5224134wildfly
private static void writeCacheElements(XMLExtendedStreamWriter writer, ModelNode cache) throws XMLStreamException {
    if (cache.hasDefined(LockingResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode locking = cache.get(LockingResourceDefinition.PATH.getKeyValuePair());
        Set<LockingResourceDefinition.Attribute> attributes = EnumSet.allOf(LockingResourceDefinition.Attribute.class);
        if (hasDefined(locking, attributes)) {
            writer.writeStartElement(XMLElement.LOCKING.getLocalName());
            writeAttributes(writer, locking, attributes);
            writer.writeEndElement();
        }
    }
    if (cache.hasDefined(TransactionResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode transaction = cache.get(TransactionResourceDefinition.PATH.getKeyValuePair());
        Set<TransactionResourceDefinition.Attribute> attributes = EnumSet.allOf(TransactionResourceDefinition.Attribute.class);
        if (hasDefined(transaction, attributes)) {
            writer.writeStartElement(XMLElement.TRANSACTION.getLocalName());
            writeAttributes(writer, transaction, attributes);
            writer.writeEndElement();
        }
    }
    if (cache.hasDefined(HeapMemoryResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode memory = cache.get(HeapMemoryResourceDefinition.PATH.getKeyValuePair());
        Iterable<Attribute> attributes = new CompositeIterable<>(EnumSet.allOf(MemoryResourceDefinition.Attribute.class), EnumSet.allOf(HeapMemoryResourceDefinition.Attribute.class));
        if (hasDefined(memory, attributes)) {
            writer.writeStartElement(XMLElement.HEAP_MEMORY.getLocalName());
            writeAttributes(writer, memory, attributes);
            writer.writeEndElement();
        }
    } else if (cache.hasDefined(OffHeapMemoryResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode memory = cache.get(OffHeapMemoryResourceDefinition.PATH.getKeyValuePair());
        Iterable<Attribute> attributes = new CompositeIterable<>(EnumSet.allOf(MemoryResourceDefinition.Attribute.class), EnumSet.allOf(OffHeapMemoryResourceDefinition.Attribute.class));
        if (hasDefined(memory, attributes)) {
            writer.writeStartElement(XMLElement.OFF_HEAP_MEMORY.getLocalName());
            writeAttributes(writer, memory, attributes);
            writer.writeEndElement();
        }
    }
    if (cache.hasDefined(ExpirationResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode expiration = cache.get(ExpirationResourceDefinition.PATH.getKeyValuePair());
        Set<ExpirationResourceDefinition.Attribute> attributes = EnumSet.allOf(ExpirationResourceDefinition.Attribute.class);
        if (hasDefined(expiration, attributes)) {
            writer.writeStartElement(XMLElement.EXPIRATION.getLocalName());
            writeAttributes(writer, expiration, attributes);
            writer.writeEndElement();
        }
    }
    Set<StoreResourceDefinition.Attribute> storeAttributes = EnumSet.complementOf(EnumSet.of(StoreResourceDefinition.Attribute.PROPERTIES));
    if (cache.hasDefined(CustomStoreResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode store = cache.get(CustomStoreResourceDefinition.PATH.getKeyValuePair());
        writer.writeStartElement(XMLElement.STORE.getLocalName());
        writeAttributes(writer, store, CustomStoreResourceDefinition.Attribute.class);
        writeAttributes(writer, store, JDBCStoreResourceDefinition.Attribute.class);
        writeAttributes(writer, store, storeAttributes);
        writeAttributes(writer, store, StoreResourceDefinition.DeprecatedAttribute.class);
        writeStoreElements(writer, store);
        writer.writeEndElement();
    }
    if (cache.hasDefined(FileStoreResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode store = cache.get(FileStoreResourceDefinition.PATH.getKeyValuePair());
        writer.writeStartElement(XMLElement.FILE_STORE.getLocalName());
        writeAttributes(writer, store, FileStoreResourceDefinition.DeprecatedAttribute.class);
        writeAttributes(writer, store, storeAttributes);
        writeAttributes(writer, store, StoreResourceDefinition.DeprecatedAttribute.class);
        writeStoreElements(writer, store);
        writer.writeEndElement();
    }
    if (cache.hasDefined(JDBCStoreResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode store = cache.get(JDBCStoreResourceDefinition.PATH.getKeyValuePair());
        writer.writeStartElement(XMLElement.JDBC_STORE.getLocalName());
        writeAttributes(writer, store, JDBCStoreResourceDefinition.Attribute.class);
        writeAttributes(writer, store, storeAttributes);
        writeAttributes(writer, store, StoreResourceDefinition.DeprecatedAttribute.class);
        writeStoreElements(writer, store);
        writeJDBCStoreTable(writer, XMLElement.TABLE, store, StringTableResourceDefinition.PATH, StringTableResourceDefinition.Attribute.PREFIX);
        writer.writeEndElement();
    }
    if (cache.hasDefined(RemoteStoreResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode store = cache.get(RemoteStoreResourceDefinition.PATH.getKeyValuePair());
        writer.writeStartElement(XMLElement.REMOTE_STORE.getLocalName());
        writeAttributes(writer, store, RemoteStoreResourceDefinition.Attribute.class);
        writeAttributes(writer, store, storeAttributes);
        writeAttributes(writer, store, StoreResourceDefinition.DeprecatedAttribute.class);
        writeStoreElements(writer, store);
        writer.writeEndElement();
    }
    if (cache.hasDefined(HotRodStoreResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode store = cache.get(HotRodStoreResourceDefinition.PATH.getKeyValuePair());
        writer.writeStartElement(XMLElement.HOTROD_STORE.getLocalName());
        writeAttributes(writer, store, HotRodStoreResourceDefinition.Attribute.class);
        writeAttributes(writer, store, storeAttributes);
        writeAttributes(writer, store, StoreResourceDefinition.DeprecatedAttribute.class);
        writeStoreElements(writer, store);
        writer.writeEndElement();
    }
    if (cache.hasDefined(PartitionHandlingResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode partitionHandling = cache.get(PartitionHandlingResourceDefinition.PATH.getKeyValuePair());
        EnumSet<PartitionHandlingResourceDefinition.Attribute> attributes = EnumSet.allOf(PartitionHandlingResourceDefinition.Attribute.class);
        if (hasDefined(partitionHandling, attributes)) {
            writer.writeStartElement(XMLElement.PARTITION_HANDLING.getLocalName());
            writeAttributes(writer, partitionHandling, attributes);
            writer.writeEndElement();
        }
    }
    if (cache.hasDefined(StateTransferResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode stateTransfer = cache.get(StateTransferResourceDefinition.PATH.getKeyValuePair());
        EnumSet<StateTransferResourceDefinition.Attribute> attributes = EnumSet.allOf(StateTransferResourceDefinition.Attribute.class);
        if (hasDefined(stateTransfer, attributes)) {
            writer.writeStartElement(XMLElement.STATE_TRANSFER.getLocalName());
            writeAttributes(writer, stateTransfer, attributes);
            writer.writeEndElement();
        }
    }
    if (cache.hasDefined(BackupsResourceDefinition.PATH.getKeyValuePair())) {
        ModelNode backups = cache.get(BackupsResourceDefinition.PATH.getKeyValuePair());
        if (backups.hasDefined(BackupResourceDefinition.WILDCARD_PATH.getKey())) {
            writer.writeStartElement(XMLElement.BACKUPS.getLocalName());
            for (Property property : backups.get(BackupResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                writer.writeStartElement(XMLElement.BACKUP.getLocalName());
                writer.writeAttribute(XMLAttribute.SITE.getLocalName(), property.getName());
                ModelNode backup = property.getValue();
                writeAttributes(writer, backup, BackupResourceDefinition.Attribute.class);
                writeAttributes(writer, backup, BackupResourceDefinition.DeprecatedAttribute.class);
                EnumSet<BackupResourceDefinition.TakeOfflineAttribute> takeOfflineAttributes = EnumSet.allOf(BackupResourceDefinition.TakeOfflineAttribute.class);
                if (hasDefined(backup, takeOfflineAttributes)) {
                    writer.writeStartElement(XMLElement.TAKE_OFFLINE.getLocalName());
                    writeAttributes(writer, backup, takeOfflineAttributes);
                    writer.writeEndElement();
                }
                writer.writeEndElement();
            }
            writer.writeEndElement();
        }
    }
}
254613.5832115wildfly
public void start(org.jboss.msc.service.StartContext context) throws org.jboss.msc.service.StartException {
    ROOT_LOGGER.debugf("started DirectConnectionFactoryActivatorService %s", context.getController().getName());
    String cfInterface = null;
    try {
        Connector cmd = mdr.getValue().getResourceAdapter(raId);
        ResourceAdapter ra = cmd.getResourceadapter();
        if (ra.getOutboundResourceadapter() != null) {
            for (ConnectionDefinition cd : ra.getOutboundResourceadapter().getConnectionDefinitions()) {
                if (cd.getConnectionFactoryInterface().getValue().equals(interfaceName))
                    cfInterface = cd.getConnectionFactoryInterface().getValue();
            }
        }
        if (cfInterface == null || !cfInterface.equals(interfaceName)) {
            throw ConnectorLogger.ROOT_LOGGER.invalidConnectionFactory(cfInterface, resourceAdapter, jndiName);
        }
        Map<String, String> raConfigProperties = new HashMap<String, String>();
        Map<String, String> mcfConfigProperties = new HashMap<String, String>();
        String securitySetting = null;
        String securitySettingDomain = null;
        if (properties != null) {
            for (Map.Entry<String, String> prop : properties.entrySet()) {
                String key = prop.getKey();
                String value = prop.getValue();
                if (key.equals("ironjacamar.security")) {
                    securitySetting = value;
                } else if (key.equals("ironjacamar.security.elytron-authentication-context")) {
                    securitySettingDomain = value;
                } else if (key.equals("ironjacamar.security.domain")) {
                    throw new StartException(SUBSYSTEM_RA_LOGGER.legacySecurityNotSupported());
                } else {
                    if (key.startsWith("ra.")) {
                        raConfigProperties.put(key.substring(3), value);
                    } else if (key.startsWith("mcf.")) {
                        mcfConfigProperties.put(key.substring(4), value);
                    } else {
                        mcfConfigProperties.put(key, value);
                    }
                }
            }
        }
        String mcfClass = null;
        if (ra.getOutboundResourceadapter() != null) {
            for (ConnectionDefinition cd : ra.getOutboundResourceadapter().getConnectionDefinitions()) {
                if (cd.getConnectionFactoryInterface().getValue().equals(cfInterface))
                    mcfClass = cd.getManagedConnectionFactoryClass().getValue();
            }
        }
        Security security = null;
        if (securitySetting != null) {
            if ("".equals(securitySetting) || ("application".equals(securitySetting))) {
                throw new StartException(SUBSYSTEM_RA_LOGGER.legacySecurityNotSupported());
            } else if ("domain".equals(securitySetting) && securitySettingDomain != null) {
                security = new SecurityImpl(securitySettingDomain, null, false);
            } else if ("domain-and-application".equals(securitySetting) && securitySettingDomain != null) {
                security = new SecurityImpl(null, securitySettingDomain, false);
            }
        }
        if (security == null) {
            SUBSYSTEM_RA_LOGGER.noSecurityDefined(jndiName);
        }
        Pool pool = null;
        Boolean isXA = Boolean.FALSE;
        if (transactionSupport == TransactionSupport.TransactionSupportLevel.XATransaction) {
            pool = new XaPoolImpl(minPoolSize < 0 ? Defaults.MIN_POOL_SIZE : minPoolSize, Defaults.INITIAL_POOL_SIZE, maxPoolSize < 0 ? Defaults.MAX_POOL_SIZE : maxPoolSize, Defaults.PREFILL, Defaults.USE_STRICT_MIN, Defaults.FLUSH_STRATEGY, null, Defaults.FAIR, Defaults.IS_SAME_RM_OVERRIDE, Defaults.INTERLEAVING, Defaults.PAD_XID, Defaults.WRAP_XA_RESOURCE, Defaults.NO_TX_SEPARATE_POOL);
            isXA = Boolean.TRUE;
        } else {
            pool = new PoolImpl(minPoolSize < 0 ? Defaults.MIN_POOL_SIZE : minPoolSize, Defaults.INITIAL_POOL_SIZE, maxPoolSize < 0 ? Defaults.MAX_POOL_SIZE : maxPoolSize, Defaults.PREFILL, Defaults.USE_STRICT_MIN, Defaults.FLUSH_STRATEGY, null, Defaults.FAIR);
        }
        TransactionSupportEnum transactionSupportValue = TransactionSupportEnum.NoTransaction;
        if (transactionSupport == TransactionSupport.TransactionSupportLevel.XATransaction) {
            transactionSupportValue = TransactionSupportEnum.XATransaction;
        } else if (transactionSupport == TransactionSupport.TransactionSupportLevel.LocalTransaction) {
            transactionSupportValue = TransactionSupportEnum.LocalTransaction;
        }
        org.jboss.jca.common.api.metadata.resourceadapter.ConnectionDefinition cd = new org.jboss.jca.common.metadata.resourceadapter.ConnectionDefinitionImpl(mcfConfigProperties, mcfClass, jndiName, poolName(cfInterface), Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Defaults.CONNECTABLE, Defaults.TRACKING, Defaults.MCP, Defaults.ENLISTMENT_TRACE, pool, null, null, security, null, isXA);
        Activation activation = new ActivationImpl(null, null, transactionSupportValue, Collections.singletonList(cd), Collections.<AdminObject>emptyList(), raConfigProperties, Collections.<String>emptyList(), null, null);
        String serviceName = jndiName;
        serviceName = serviceName.replace(':', '_');
        serviceName = serviceName.replace('/', '_');
        ResourceAdapterActivatorService activator = new ResourceAdapterActivatorService(cmd, activation, module.getClassLoader(), serviceName);
        activator.setCreateBinderService(false);
        activator.setBindInfo(bindInfo);
        org.jboss.msc.service.ServiceTarget serviceTarget = context.getChildTarget();
        ServiceName activatorServiceName = ConnectorServices.RESOURCE_ADAPTER_ACTIVATOR_SERVICE.append(serviceName);
        org.jboss.msc.service.ServiceBuilder connectionFactoryServiceBuilder = serviceTarget.addService(activatorServiceName, activator).addDependency(ConnectorServices.IRONJACAMAR_MDR, AS7MetadataRepository.class, activator.getMdrInjector()).addDependency(ConnectorServices.RA_REPOSITORY_SERVICE, ResourceAdapterRepository.class, activator.getRaRepositoryInjector()).addDependency(ConnectorServices.MANAGEMENT_REPOSITORY_SERVICE, ManagementRepository.class, activator.getManagementRepositoryInjector()).addDependency(ConnectorServices.RESOURCE_ADAPTER_REGISTRY_SERVICE, ResourceAdapterDeploymentRegistry.class, activator.getRegistryInjector()).addDependency(ConnectorServices.CONNECTOR_CONFIG_SERVICE, JcaSubsystemConfiguration.class, activator.getConfigInjector()).addDependency(ConnectorServices.CCM_SERVICE, CachedConnectionManager.class, activator.getCcmInjector()).addDependency(ConnectorServices.getCachedCapabilityServiceName(ConnectorServices.TRANSACTION_INTEGRATION_CAPABILITY_NAME), TransactionIntegration.class, activator.getTxIntegrationInjector());
        connectionFactoryServiceBuilder.requires(ConnectorServices.getCachedCapabilityServiceName(NamingService.CAPABILITY_NAME));
        connectionFactoryServiceBuilder.requires(ConnectorServices.getCachedCapabilityServiceName(ConnectorServices.LOCAL_TRANSACTION_PROVIDER_CAPABILITY));
        connectionFactoryServiceBuilder.requires(ConnectorServices.BOOTSTRAP_CONTEXT_SERVICE.append("default"));
        connectionFactoryServiceBuilder.setInitialMode(org.jboss.msc.service.ServiceController.Mode.ACTIVE).install();
    } catch (Exception e) {
        throw new org.jboss.msc.service.StartException(e);
    }
}
252083.1134138wildfly
protected void parseConnectionDefinitions_3_0(final XMLExtendedStreamReader reader, final Map<String, ModelNode> map, final Map<String, HashMap<String, ModelNode>> configMap, final boolean isXa) throws XMLStreamException, ParserException, ValidateException {
    final ModelNode connectionDefinitionNode = new ModelNode();
    connectionDefinitionNode.get(OP).set(ADD);
    String poolName = null;
    String jndiName = null;
    int attributeSize = reader.getAttributeCount();
    boolean poolDefined = Boolean.FALSE;
    for (int i = 0; i < attributeSize; i++) {
        ConnectionDefinition.Attribute attribute = ConnectionDefinition.Attribute.forName(reader.getAttributeLocalName(i));
        String value = reader.getAttributeValue(i);
        switch(attribute) {
            case ENABLED:
                {
                    ENABLED.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case CONNECTABLE:
                {
                    CONNECTABLE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case TRACKING:
                {
                    TRACKING.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case JNDI_NAME:
                {
                    jndiName = value;
                    JNDI_NAME.parseAndSetParameter(jndiName, connectionDefinitionNode, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = value;
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    USE_JAVA_CONTEXT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case USE_CCM:
                {
                    USE_CCM.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case SHARABLE:
                {
                    SHARABLE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case ENLISTMENT:
                {
                    ENLISTMENT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case CLASS_NAME:
                {
                    CLASS_NAME.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            default:
                throw ParseUtils.unexpectedAttribute(reader, i);
        }
    }
    if (poolName == null || poolName.trim().equals("")) {
        if (jndiName != null && jndiName.trim().length() != 0) {
            if (jndiName.contains("/")) {
                poolName = jndiName.substring(jndiName.lastIndexOf("/") + 1);
            } else {
                poolName = jndiName.substring(jndiName.lastIndexOf(":") + 1);
            }
        } else {
            throw ParseUtils.missingRequired(reader, EnumSet.of(ConnectionDefinition.Attribute.JNDI_NAME));
        }
    }
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (Activation.Tag.forName(reader.getLocalName()) == Activation.Tag.CONNECTION_DEFINITION) {
                        map.put(poolName, connectionDefinitionNode);
                        return;
                    } else {
                        if (ConnectionDefinition.Tag.forName(reader.getLocalName()) == ConnectionDefinition.Tag.UNKNOWN) {
                            throw ParseUtils.unexpectedEndElement(reader);
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(ConnectionDefinition.Tag.forName(reader.getLocalName())) {
                        case CONFIG_PROPERTY:
                            {
                                if (!configMap.containsKey(poolName)) {
                                    configMap.put(poolName, new HashMap<String, ModelNode>(0));
                                }
                                parseConfigProperties(reader, configMap.get(poolName));
                                break;
                            }
                        case SECURITY:
                            {
                                parseSecuritySettings(reader, connectionDefinitionNode);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOut(reader, connectionDefinitionNode);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidation(reader, connectionDefinitionNode);
                                break;
                            }
                        case XA_POOL:
                            {
                                if (!isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parseXaPool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case POOL:
                            {
                                if (isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parsePool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, connectionDefinitionNode);
                                break;
                            }
                        default:
                            throw ParseUtils.unexpectedElement(reader);
                    }
                    break;
                }
        }
    }
    throw ParseUtils.unexpectedEndElement(reader);
}
251411.9561106wildfly
public boolean equals(Object obj) {
    if (this == obj)
        return true;
    if (obj == null)
        return false;
    if (!(obj instanceof ModifiableConnDef))
        return false;
    ModifiableConnDef other = (ModifiableConnDef) obj;
    if (className == null) {
        if (other.className != null)
            return false;
    } else if (!className.equals(other.className))
        return false;
    if (configProperties == null) {
        if (other.configProperties != null)
            return false;
    } else if (!configProperties.equals(other.configProperties))
        return false;
    if (enabled == null) {
        if (other.enabled != null)
            return false;
    } else if (!enabled.equals(other.enabled))
        return false;
    if (jndiName == null) {
        if (other.jndiName != null)
            return false;
    } else if (!jndiName.equals(other.jndiName))
        return false;
    if (pool == null) {
        if (other.pool != null)
            return false;
    } else if (!pool.equals(other.pool))
        return false;
    if (poolName == null) {
        if (other.poolName != null)
            return false;
    } else if (!poolName.equals(other.poolName))
        return false;
    if (recovery == null) {
        if (other.recovery != null)
            return false;
    } else if (!recovery.equals(other.recovery))
        return false;
    if (security == null) {
        if (other.security != null)
            return false;
    } else if (!security.equals(other.security))
        return false;
    if (timeOut == null) {
        if (other.timeOut != null)
            return false;
    } else if (!timeOut.equals(other.timeOut))
        return false;
    if (useJavaContext == null) {
        if (other.useJavaContext != null)
            return false;
    } else if (!useJavaContext.equals(other.useJavaContext))
        return false;
    if (useCcm == null) {
        if (other.useCcm != null)
            return false;
    } else if (!useCcm.equals(other.useCcm))
        return false;
    if (validation == null) {
        if (other.validation != null)
            return false;
    } else if (!validation.equals(other.validation))
        return false;
    if (isXa() == null) {
        if (other.isXa() != null)
            return false;
    } else if (!isXa().equals(other.isXa()))
        return false;
    if (sharable == null) {
        if (other.sharable != null)
            return false;
    } else if (!sharable.equals(other.sharable))
        return false;
    if (enlistment == null) {
        if (other.enlistment != null)
            return false;
    } else if (!enlistment.equals(other.enlistment))
        return false;
    if (connectable == null) {
        if (other.connectable != null)
            return false;
    } else if (!connectable.equals(other.connectable))
        return false;
    if (tracking == null) {
        if (other.tracking != null)
            return false;
    } else if (!tracking.equals(other.tracking))
        return false;
    if (mcp == null) {
        if (other.mcp != null)
            return false;
    } else if (!mcp.equals(other.mcp))
        return false;
    if (enlistmentTrace == null) {
        if (other.enlistmentTrace != null)
            return false;
    } else if (!enlistmentTrace.equals(other.enlistmentTrace))
        return false;
    return true;
}
251259.5529178wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (deploymentUnit.getParent() != null) {
        return;
    }
    final Module module = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.MODULE);
    if (module == null) {
        return;
    }
    CapabilityServiceSupport capabilityServiceSupport = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.CAPABILITY_SERVICE_SUPPORT);
    final List<EJBClientInterceptor> deploymentEjbClientInterceptors = getClassPathInterceptors(module.getClassLoader());
    List<EJBClientInterceptor> staticEjbClientInterceptors = deploymentUnit.getAttachment(org.jboss.as.ejb3.subsystem.Attachments.STATIC_EJB_CLIENT_INTERCEPTORS);
    List<EJBClientInterceptor> ejbClientInterceptors = new ArrayList<>();
    if (deploymentEjbClientInterceptors != null) {
        ejbClientInterceptors.addAll(deploymentEjbClientInterceptors);
    }
    if (staticEjbClientInterceptors != null) {
        ejbClientInterceptors.addAll(staticEjbClientInterceptors);
    }
    final boolean interceptorsDefined = ejbClientInterceptors != null && !ejbClientInterceptors.isEmpty();
    final EJBClientDescriptorMetaData ejbClientDescriptorMetaData = deploymentUnit.getAttachment(Attachments.EJB_CLIENT_METADATA);
    if (ejbClientDescriptorMetaData == null && !interceptorsDefined) {
        return;
    }
    final ServiceName ejbClientContextServiceName = EJBClientContextService.DEPLOYMENT_BASE_SERVICE_NAME.append(deploymentUnit.getName());
    final ServiceTarget serviceTarget = phaseContext.getServiceTarget();
    final EJBClientContextService service = new EJBClientContextService();
    final ServiceBuilder<EJBClientContextService> serviceBuilder = serviceTarget.addService(ejbClientContextServiceName, service);
    if (appclient) {
        serviceBuilder.addDependency(EJBClientContextService.APP_CLIENT_URI_SERVICE_NAME, URI.class, service.getAppClientUri());
        serviceBuilder.addDependency(EJBClientContextService.APP_CLIENT_EJB_PROPERTIES_SERVICE_NAME, String.class, service.getAppClientEjbProperties());
    }
    serviceBuilder.addDependency(EJBClientConfiguratorService.SERVICE_NAME, EJBClientConfiguratorService.class, service.getConfiguratorServiceInjector());
    if (ejbClientDescriptorMetaData != null) {
        checkDescriptorConfiguration(ejbClientDescriptorMetaData);
        final Injector<RemotingProfileService> profileServiceInjector = new Injector<RemotingProfileService>() {

            final Injector<EJBTransportProvider> injector = service.getLocalProviderInjector();

            boolean injected = false;

            public void inject(final RemotingProfileService value) throws InjectionException {
                final Supplier<EJBTransportProvider> transportSupplier = value.getLocalTransportProviderSupplier();
                final EJBTransportProvider provider = transportSupplier != null ? transportSupplier.get() : null;
                if (provider != null) {
                    injected = true;
                    injector.inject(provider);
                }
            }

            public void uninject() {
                if (injected) {
                    injected = false;
                    injector.uninject();
                }
            }
        };
        final String profile = ejbClientDescriptorMetaData.getProfile();
        final ServiceName profileServiceName;
        if (profile != null) {
            profileServiceName = capabilityServiceSupport.getCapabilityServiceName(REMOTING_PROFILE_CAPABILITY_NAME, profile);
            serviceBuilder.addDependency(profileServiceName, RemotingProfileService.class, profileServiceInjector);
            serviceBuilder.addDependency(profileServiceName, RemotingProfileService.class, service.getProfileServiceInjector());
        } else {
            profileServiceName = ejbClientContextServiceName.append(INTERNAL_REMOTING_PROFILE);
            final Map<String, RemotingProfileService.RemotingConnectionSpec> remotingConnectionMap = new HashMap<>();
            final List<RemotingProfileService.HttpConnectionSpec> httpConnections = new ArrayList<>();
            final ServiceBuilder<?> profileServiceBuilder = serviceTarget.addService(profileServiceName);
            final Consumer<RemotingProfileService> consumer = profileServiceBuilder.provides(profileServiceName);
            Supplier<EJBTransportProvider> localTransportProviderSupplier = null;
            if (ejbClientDescriptorMetaData.isLocalReceiverExcluded() != Boolean.TRUE) {
                final Boolean passByValue = ejbClientDescriptorMetaData.isLocalReceiverPassByValue();
                localTransportProviderSupplier = profileServiceBuilder.requires(passByValue == Boolean.FALSE ? LocalTransportProvider.BY_REFERENCE_SERVICE_NAME : LocalTransportProvider.BY_VALUE_SERVICE_NAME);
            }
            final Collection<EJBClientDescriptorMetaData.RemotingReceiverConfiguration> receiverConfigurations = ejbClientDescriptorMetaData.getRemotingReceiverConfigurations();
            for (EJBClientDescriptorMetaData.RemotingReceiverConfiguration receiverConfiguration : receiverConfigurations) {
                final String connectionRef = receiverConfiguration.getOutboundConnectionRef();
                final long connectTimeout = receiverConfiguration.getConnectionTimeout();
                final Properties channelCreationOptions = receiverConfiguration.getChannelCreationOptions();
                final OptionMap optionMap = getOptionMapFromProperties(channelCreationOptions, EJBClientDescriptorMetaDataProcessor.class.getClassLoader());
                final ServiceName internalServiceName = capabilityServiceSupport.getCapabilityServiceName(OUTBOUND_CONNECTION_CAPABILITY_NAME, connectionRef);
                final Supplier<OutboundConnection> supplier = profileServiceBuilder.requires(internalServiceName);
                final RemotingProfileService.RemotingConnectionSpec connectionSpec = new RemotingProfileService.RemotingConnectionSpec(connectionRef, supplier, optionMap, connectTimeout);
                remotingConnectionMap.put(connectionRef, connectionSpec);
            }
            for (EJBClientDescriptorMetaData.HttpConnectionConfiguration httpConfigurations : ejbClientDescriptorMetaData.getHttpConnectionConfigurations()) {
                final String uri = httpConfigurations.getUri();
                RemotingProfileService.HttpConnectionSpec httpConnectionSpec = new RemotingProfileService.HttpConnectionSpec(uri);
                httpConnections.add(httpConnectionSpec);
            }
            final RemotingProfileService profileService = new RemotingProfileService(consumer, localTransportProviderSupplier, Collections.emptyList(), remotingConnectionMap, httpConnections);
            profileServiceBuilder.setInstance(profileService);
            profileServiceBuilder.install();
            serviceBuilder.addDependency(profileServiceName, RemotingProfileService.class, profileServiceInjector);
            serviceBuilder.addDependency(profileServiceName, RemotingProfileService.class, service.getProfileServiceInjector());
        }
        final String deploymentNodeSelectorClassName = ejbClientDescriptorMetaData.getDeploymentNodeSelector();
        if (deploymentNodeSelectorClassName != null) {
            final DeploymentNodeSelector deploymentNodeSelector;
            try {
                deploymentNodeSelector = module.getClassLoader().loadClass(deploymentNodeSelectorClassName).asSubclass(DeploymentNodeSelector.class).getConstructor().newInstance();
            } catch (Exception e) {
                throw EjbLogger.ROOT_LOGGER.failedToCreateDeploymentNodeSelector(e, deploymentNodeSelectorClassName);
            }
            service.setDeploymentNodeSelector(deploymentNodeSelector);
        }
        final long invocationTimeout = ejbClientDescriptorMetaData.getInvocationTimeout();
        service.setInvocationTimeout(invocationTimeout);
        final int defaultCompression = ejbClientDescriptorMetaData.getDefaultCompression();
        service.setDefaultCompression(defaultCompression);
        final Collection<EJBClientDescriptorMetaData.ClusterConfig> clusterConfigs = ejbClientDescriptorMetaData.getClusterConfigs();
        if (!clusterConfigs.isEmpty()) {
            final List<EJBClientCluster> clientClusters = new ArrayList<>(clusterConfigs.size());
            AuthenticationContext clustersAuthenticationContext = AuthenticationContext.empty();
            for (EJBClientDescriptorMetaData.ClusterConfig clusterConfig : clusterConfigs) {
                MatchRule defaultRule = MatchRule.ALL.matchAbstractType("ejb", "jboss");
                AuthenticationConfiguration defaultAuthenticationConfiguration = AuthenticationConfiguration.empty();
                final EJBClientCluster.Builder clientClusterBuilder = new EJBClientCluster.Builder();
                final String clusterName = clusterConfig.getClusterName();
                clientClusterBuilder.setName(clusterName);
                defaultRule = defaultRule.matchProtocol("cluster");
                defaultRule = defaultRule.matchUrnName(clusterName);
                final long maxAllowedConnectedNodes = clusterConfig.getMaxAllowedConnectedNodes();
                clientClusterBuilder.setMaximumConnectedNodes(maxAllowedConnectedNodes);
                final String clusterNodeSelectorClassName = clusterConfig.getNodeSelector();
                if (clusterNodeSelectorClassName != null) {
                    final ClusterNodeSelector clusterNodeSelector;
                    try {
                        clusterNodeSelector = module.getClassLoader().loadClass(clusterNodeSelectorClassName).asSubclass(ClusterNodeSelector.class).getConstructor().newInstance();
                    } catch (Exception e) {
                        throw EjbLogger.ROOT_LOGGER.failureDuringLoadOfClusterNodeSelector(clusterNodeSelectorClassName, clusterName, e);
                    }
                    clientClusterBuilder.setClusterNodeSelector(clusterNodeSelector);
                }
                final Properties clusterChannelCreationOptions = clusterConfig.getChannelCreationOptions();
                final OptionMap clusterChannelCreationOptionMap = getOptionMapFromProperties(clusterChannelCreationOptions, EJBClientDescriptorMetaDataProcessor.class.getClassLoader());
                final Properties clusterConnectionOptions = clusterConfig.getConnectionOptions();
                final OptionMap clusterConnectionOptionMap = getOptionMapFromProperties(clusterConnectionOptions, EJBClientDescriptorMetaDataProcessor.class.getClassLoader());
                final long clusterConnectTimeout = clusterConfig.getConnectTimeout();
                clientClusterBuilder.setConnectTimeoutMilliseconds(clusterConnectTimeout);
                if (clusterConnectionOptionMap != null) {
                    RemotingOptions.mergeOptionsIntoAuthenticationConfiguration(clusterConnectionOptionMap, defaultAuthenticationConfiguration);
                }
                clustersAuthenticationContext = clustersAuthenticationContext.with(defaultRule, defaultAuthenticationConfiguration);
                final Collection<EJBClientDescriptorMetaData.ClusterNodeConfig> clusterNodeConfigs = clusterConfig.getClusterNodeConfigs();
                for (EJBClientDescriptorMetaData.ClusterNodeConfig clusterNodeConfig : clusterNodeConfigs) {
                    MatchRule nodeRule = MatchRule.ALL.matchAbstractType("ejb", "jboss");
                    AuthenticationConfiguration nodeAuthenticationConfiguration = AuthenticationConfiguration.empty();
                    final String nodeName = clusterNodeConfig.getNodeName();
                    nodeRule = nodeRule.matchProtocol("node");
                    nodeRule = nodeRule.matchUrnName(nodeName);
                    final Properties channelCreationOptions = clusterNodeConfig.getChannelCreationOptions();
                    final Properties connectionOptions = clusterNodeConfig.getConnectionOptions();
                    final OptionMap connectionOptionMap = getOptionMapFromProperties(connectionOptions, EJBClientDescriptorMetaDataProcessor.class.getClassLoader());
                    final long connectTimeout = clusterNodeConfig.getConnectTimeout();
                    if (connectionOptionMap != null) {
                        RemotingOptions.mergeOptionsIntoAuthenticationConfiguration(connectionOptionMap, nodeAuthenticationConfiguration);
                    }
                    clustersAuthenticationContext = clustersAuthenticationContext.with(0, nodeRule, nodeAuthenticationConfiguration);
                }
                final EJBClientCluster clientCluster = clientClusterBuilder.build();
                clientClusters.add(clientCluster);
            }
            service.setClientClusters(clientClusters);
            service.setClustersAuthenticationContext(clustersAuthenticationContext);
        }
        deploymentUnit.putAttachment(EjbDeploymentAttachmentKeys.EJB_REMOTING_PROFILE_SERVICE_NAME, profileServiceName);
    } else {
        if (!appclient) {
            serviceBuilder.addDependency(LocalTransportProvider.DEFAULT_LOCAL_TRANSPORT_PROVIDER_SERVICE_NAME, EJBTransportProvider.class, service.getLocalProviderInjector());
        }
    }
    if (interceptorsDefined) {
        service.setClientInterceptors(ejbClientInterceptors);
    }
    serviceBuilder.install();
    EjbLogger.DEPLOYMENT_LOGGER.debugf("Deployment unit %s will use %s as the EJB client context service", deploymentUnit, ejbClientContextServiceName);
    phaseContext.addDeploymentDependency(ejbClientContextServiceName, EjbDeploymentAttachmentKeys.EJB_CLIENT_CONTEXT_SERVICE);
    deploymentUnit.putAttachment(EjbDeploymentAttachmentKeys.EJB_CLIENT_CONTEXT_SERVICE_NAME, ejbClientContextServiceName);
}
262234.0931132cassandra
public void scrubInternal(SSTableRewriter writer) {
    if (indexAvailable() && indexIterator.dataPosition() != 0) {
        outputHandler.warn("First position reported by index should be 0, was " + indexIterator.dataPosition() + ", continuing without index.");
        indexIterator.close();
        indexIterator = null;
    }
    DecoratedKey prevKey = null;
    while (!dataFile.isEOF()) {
        if (scrubInfo.isStopRequested())
            throw new CompactionInterruptedException(scrubInfo.getCompactionInfo());
        long dataStart = dataFile.getFilePointer();
        outputHandler.debug("Reading row at %d", dataStart);
        DecoratedKey key = null;
        Throwable keyReadError = null;
        try {
            ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
            if (!isIndex)
                partitionKeyType.validate(raw);
            key = sstable.decorateKey(raw);
        } catch (Throwable th) {
            keyReadError = th;
            throwIfFatal(th);
        }
        long dataStartFromIndex = -1;
        long dataSizeFromIndex = -1;
        ByteBuffer currentIndexKey = null;
        if (indexAvailable()) {
            currentIndexKey = indexIterator.key();
            dataStartFromIndex = indexIterator.dataPosition();
            if (!indexIterator.isExhausted()) {
                try {
                    indexIterator.advance();
                    if (!indexIterator.isExhausted())
                        dataSizeFromIndex = indexIterator.dataPosition() - dataStartFromIndex;
                } catch (Throwable th) {
                    throwIfFatal(th);
                    outputHandler.warn(th, "Failed to advance to the next index position. Index is corrupted. " + "Continuing without the index. Last position read is %d.", indexIterator.dataPosition());
                    indexIterator.close();
                    indexIterator = null;
                    currentIndexKey = null;
                    dataStartFromIndex = -1;
                    dataSizeFromIndex = -1;
                }
            }
        }
        String keyName = key == null ? "(unreadable key)" : keyString(key);
        outputHandler.debug("partition %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex));
        try {
            if (key == null)
                throw new IOError(new IOException("Unable to read partition key from data file", keyReadError));
            if (currentIndexKey != null && !key.getKey().equals(currentIndexKey)) {
                throw new IOError(new IOException(String.format("Key from data file (%s) does not match key from index file (%s)", ByteBufferUtil.bytesToHex(key.getKey()), ByteBufferUtil.bytesToHex(currentIndexKey))));
            }
            if (indexIterator != null && dataSizeFromIndex > dataFile.length())
                throw new IOError(new IOException("Impossible partition size (greater than file length): " + dataSizeFromIndex));
            if (indexIterator != null && dataStart != dataStartFromIndex)
                outputHandler.warn("Data file partition position %d differs from index file row position %d", dataStart, dataStartFromIndex);
            if (tryAppend(prevKey, key, writer))
                prevKey = key;
        } catch (Throwable th) {
            throwIfFatal(th);
            outputHandler.warn(th, "Error reading partition %s (stacktrace follows):", keyName);
            if (currentIndexKey != null && (key == null || !key.getKey().equals(currentIndexKey) || dataStart != dataStartFromIndex)) {
                long rowStartFromIndex = dataStartFromIndex + TypeSizes.SHORT_SIZE + currentIndexKey.remaining();
                outputHandler.output("Retrying from partition index; data is %s bytes starting at %s", dataSizeFromIndex, rowStartFromIndex);
                key = sstable.decorateKey(currentIndexKey);
                try {
                    if (!isIndex)
                        partitionKeyType.validate(key.getKey());
                    dataFile.seek(rowStartFromIndex);
                    if (tryAppend(prevKey, key, writer))
                        prevKey = key;
                } catch (Throwable th2) {
                    throwIfFatal(th2);
                    throwIfCannotContinue(key, th2);
                    outputHandler.warn(th2, "Retry failed too. Skipping to next partition (retry's stacktrace follows)");
                    badPartitions++;
                    if (!seekToNextPartition())
                        break;
                }
            } else {
                throwIfCannotContinue(key, th);
                badPartitions++;
                if (indexIterator != null) {
                    outputHandler.warn("Partition starting at position %d is unreadable; skipping to next", dataStart);
                    if (!seekToNextPartition())
                        break;
                } else {
                    outputHandler.warn("Unrecoverable error while scrubbing %s." + "Scrubbing cannot continue. The sstable will be marked for deletion. " + "You can attempt manual recovery from the pre-scrub snapshot. " + "You can also run nodetool repair to transfer the data from a healthy replica, if any.", sstable);
                    break;
                }
            }
        }
    }
}
265276.771146cassandra
public void testBatchTTLConditionalInteraction() throws Throwable {
    createTable(String.format("CREATE TABLE %s.clustering (\n" + "  id int,\n" + "  clustering1 int,\n" + "  clustering2 int,\n" + "  clustering3 int,\n" + "  val int, \n" + " PRIMARY KEY(id, clustering1, clustering2, clustering3)" + ")", KEYSPACE));
    execute("DELETE FROM " + KEYSPACE + ".clustering WHERE id=1");
    String clusteringInsert = "INSERT INTO " + KEYSPACE + ".clustering(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s); ";
    String clusteringTTLInsert = "INSERT INTO " + KEYSPACE + ".clustering(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s) USING TTL %s; ";
    String clusteringConditionalInsert = "INSERT INTO " + KEYSPACE + ".clustering(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s) IF NOT EXISTS; ";
    String clusteringConditionalTTLInsert = "INSERT INTO " + KEYSPACE + ".clustering(id, clustering1, clustering2, clustering3, val) VALUES(%s, %s, %s, %s, %s)  IF NOT EXISTS USING TTL %s; ";
    String clusteringUpdate = "UPDATE " + KEYSPACE + ".clustering SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringTTLUpdate = "UPDATE " + KEYSPACE + ".clustering USING TTL %s SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringConditionalUpdate = "UPDATE " + KEYSPACE + ".clustering SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF val=%s ;";
    String clusteringConditionalTTLUpdate = "UPDATE " + KEYSPACE + ".clustering USING TTL %s SET val=%s WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF val=%s ;";
    String clusteringDelete = "DELETE FROM " + KEYSPACE + ".clustering WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s ;";
    String clusteringRangeDelete = "DELETE FROM " + KEYSPACE + ".clustering WHERE id=%s AND clustering1=%s ;";
    String clusteringConditionalDelete = "DELETE FROM " + KEYSPACE + ".clustering WHERE id=%s AND clustering1=%s AND clustering2=%s AND clustering3=%s IF val=%s ; ";
    execute("BEGIN BATCH " + String.format(clusteringInsert, 1, 1, 1, 1, 1) + " APPLY BATCH");
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 1, 1));
    StringBuilder cmd2 = new StringBuilder();
    cmd2.append("BEGIN BATCH ");
    cmd2.append(String.format(clusteringInsert, 1, 1, 1, 2, 2));
    cmd2.append(String.format(clusteringConditionalUpdate, 11, 1, 1, 1, 1, 1));
    cmd2.append("APPLY BATCH ");
    execute(cmd2.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 1, 11), row(1, 1, 1, 2, 2));
    StringBuilder cmd3 = new StringBuilder();
    cmd3.append("BEGIN BATCH ");
    cmd3.append(String.format(clusteringInsert, 1, 1, 2, 3, 23));
    cmd3.append(String.format(clusteringConditionalUpdate, 22, 1, 1, 1, 2, 2));
    cmd3.append(String.format(clusteringDelete, 1, 1, 1, 1));
    cmd3.append("APPLY BATCH ");
    execute(cmd3.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 2, 22), row(1, 1, 2, 3, 23));
    StringBuilder cmd4 = new StringBuilder();
    cmd4.append("BEGIN BATCH ");
    cmd4.append(String.format(clusteringInsert, 1, 2, 3, 4, 1234));
    cmd4.append(String.format(clusteringConditionalUpdate, 234, 1, 1, 1, 2, 22));
    cmd4.append("APPLY BATCH ");
    execute(cmd4.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 2, 234), row(1, 1, 2, 3, 23), row(1, 2, 3, 4, 1234));
    StringBuilder cmd5 = new StringBuilder();
    cmd5.append("BEGIN BATCH ");
    cmd5.append(String.format(clusteringRangeDelete, 1, 2));
    cmd5.append(String.format(clusteringConditionalUpdate, 1234, 1, 1, 1, 2, 234));
    cmd5.append("APPLY BATCH ");
    execute(cmd5.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 2, 1234), row(1, 1, 2, 3, 23));
    StringBuilder cmd6 = new StringBuilder();
    cmd6.append("BEGIN BATCH ");
    cmd6.append(String.format(clusteringUpdate, 345, 1, 3, 4, 5));
    cmd6.append(String.format(clusteringConditionalUpdate, 1, 1, 1, 1, 2, 1234));
    cmd6.append("APPLY BATCH ");
    execute(cmd6.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 2, 1), row(1, 1, 2, 3, 23), row(1, 3, 4, 5, 345));
    StringBuilder cmd7 = new StringBuilder();
    cmd7.append("BEGIN BATCH ");
    cmd7.append(String.format(clusteringDelete, 1, 3, 4, 5));
    cmd7.append(String.format(clusteringConditionalUpdate, 2300, 1, 1, 2, 3, 1));
    cmd7.append("APPLY BATCH ");
    execute(cmd7.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 1, 1, 2, 1), row(1, 1, 2, 3, 23), row(1, 3, 4, 5, 345));
    StringBuilder cmd8 = new StringBuilder();
    cmd8.append("BEGIN BATCH ");
    cmd8.append(String.format(clusteringConditionalDelete, 1, 3, 4, 5, 345));
    cmd8.append(String.format(clusteringRangeDelete, 1, 1));
    cmd8.append(String.format(clusteringInsert, 1, 2, 3, 4, 5));
    cmd8.append("APPLY BATCH ");
    execute(cmd8.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 2, 3, 4, 5));
    StringBuilder cmd9 = new StringBuilder();
    cmd9.append("BEGIN BATCH ");
    cmd9.append(String.format(clusteringConditionalInsert, 1, 3, 4, 5, 345));
    cmd9.append(String.format(clusteringDelete, 1, 2, 3, 4));
    cmd9.append("APPLY BATCH ");
    execute(cmd9.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 3, 4, 5, 345));
    StringBuilder cmd10 = new StringBuilder();
    cmd10.append("BEGIN BATCH ");
    cmd10.append(String.format(clusteringTTLInsert, 1, 2, 3, 4, 5, 5));
    cmd10.append(String.format(clusteringConditionalTTLUpdate, 10, 5, 1, 3, 4, 5, 345));
    cmd10.append("APPLY BATCH ");
    execute(cmd10.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 2, 3, 4, 5), row(1, 3, 4, 5, 5));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 3, 4, 5, 5));
    StringBuilder cmd11 = new StringBuilder();
    cmd11.append("BEGIN BATCH ");
    cmd11.append(String.format(clusteringConditionalTTLInsert, 1, 2, 3, 4, 5, 5));
    cmd11.append(String.format(clusteringInsert, 1, 4, 5, 6, 7));
    cmd11.append("APPLY BATCH ");
    execute(cmd11.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 2, 3, 4, 5), row(1, 3, 4, 5, 5), row(1, 4, 5, 6, 7));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 3, 4, 5, null), row(1, 4, 5, 6, 7));
    StringBuilder cmd12 = new StringBuilder();
    cmd12.append("BEGIN BATCH ");
    cmd12.append(String.format(clusteringConditionalTTLUpdate, 5, 5, 1, 3, 4, 5, null));
    cmd12.append(String.format(clusteringTTLUpdate, 5, 8, 1, 4, 5, 6));
    cmd12.append("APPLY BATCH ");
    execute(cmd12.toString());
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 3, 4, 5, 5), row(1, 4, 5, 6, 8));
    Thread.sleep(6000);
    assertRows(execute("SELECT * FROM " + KEYSPACE + ".clustering WHERE id=1"), row(1, 3, 4, 5, null), row(1, 4, 5, 6, null));
}
263019.3816139elasticsearch
protected PainlessContextInfo createTestInstance() {
    int classesSize = randomIntBetween(20, 100);
    List<PainlessContextClassInfo> classes = new ArrayList<>();
    for (int clazz = 0; clazz < classesSize; ++clazz) {
        int constructorsSize = randomInt(4);
        List<PainlessContextConstructorInfo> constructors = new ArrayList<>(constructorsSize);
        for (int constructor = 0; constructor < constructorsSize; ++constructor) {
            int parameterSize = randomInt(12);
            List<String> parameters = new ArrayList<>(parameterSize);
            for (int parameter = 0; parameter < parameterSize; ++parameter) {
                parameters.add(randomAlphaOfLengthBetween(1, 20));
            }
            constructors.add(new PainlessContextConstructorInfo(randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
        }
        ;
        int staticMethodsSize = randomInt(4);
        List<PainlessContextMethodInfo> staticMethods = new ArrayList<>(staticMethodsSize);
        for (int staticMethod = 0; staticMethod < staticMethodsSize; ++staticMethod) {
            int parameterSize = randomInt(12);
            List<String> parameters = new ArrayList<>(parameterSize);
            for (int parameter = 0; parameter < parameterSize; ++parameter) {
                parameters.add(randomAlphaOfLengthBetween(1, 20));
            }
            staticMethods.add(new PainlessContextMethodInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
        }
        int methodsSize = randomInt(10);
        List<PainlessContextMethodInfo> methods = new ArrayList<>(methodsSize);
        for (int method = 0; method < methodsSize; ++method) {
            int parameterSize = randomInt(12);
            List<String> parameters = new ArrayList<>(parameterSize);
            for (int parameter = 0; parameter < parameterSize; ++parameter) {
                parameters.add(randomAlphaOfLengthBetween(1, 20));
            }
            methods.add(new PainlessContextMethodInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
        }
        int staticFieldsSize = randomInt(10);
        List<PainlessContextFieldInfo> staticFields = new ArrayList<>();
        for (int staticField = 0; staticField < staticFieldsSize; ++staticField) {
            staticFields.add(new PainlessContextFieldInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10))));
        }
        int fieldsSize = randomInt(4);
        List<PainlessContextFieldInfo> fields = new ArrayList<>();
        for (int field = 0; field < fieldsSize; ++field) {
            fields.add(new PainlessContextFieldInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10))));
        }
        classes.add(new PainlessContextClassInfo(randomAlphaOfLength(randomIntBetween(3, 200)), randomBoolean(), constructors, staticMethods, methods, fields, staticFields));
    }
    int importedMethodsSize = randomInt(4);
    List<PainlessContextMethodInfo> importedMethods = new ArrayList<>(importedMethodsSize);
    for (int importedMethod = 0; importedMethod < importedMethodsSize; ++importedMethod) {
        int parameterSize = randomInt(12);
        List<String> parameters = new ArrayList<>(parameterSize);
        for (int parameter = 0; parameter < parameterSize; ++parameter) {
            parameters.add(randomAlphaOfLengthBetween(1, 20));
        }
        importedMethods.add(new PainlessContextMethodInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
    }
    int classBindingsSize = randomInt(3);
    List<PainlessContextClassBindingInfo> classBindings = new ArrayList<>(classBindingsSize);
    for (int classBinding = 0; classBinding < classBindingsSize; ++classBinding) {
        int parameterSize = randomIntBetween(2, 5);
        int readOnly = randomIntBetween(1, parameterSize - 1);
        List<String> parameters = new ArrayList<>(parameterSize);
        for (int parameter = 0; parameter < parameterSize; ++parameter) {
            parameters.add(randomAlphaOfLengthBetween(1, 20));
        }
        classBindings.add(new PainlessContextClassBindingInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), readOnly, parameters));
    }
    int instanceBindingsSize = randomInt(3);
    List<PainlessContextInstanceBindingInfo> instanceBindings = new ArrayList<>(classBindingsSize);
    for (int instanceBinding = 0; instanceBinding < instanceBindingsSize; ++instanceBinding) {
        int parameterSize = randomInt(12);
        List<String> parameters = new ArrayList<>(parameterSize);
        for (int parameter = 0; parameter < parameterSize; ++parameter) {
            parameters.add(randomAlphaOfLengthBetween(1, 20));
        }
        instanceBindings.add(new PainlessContextInstanceBindingInfo(randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), randomAlphaOfLength(randomIntBetween(4, 10)), parameters));
    }
    return new PainlessContextInfo(randomAlphaOfLength(20), classes, importedMethods, classBindings, instanceBindings);
}
263495.3220125elasticsearch
public void testRandomSingleTermVectors() throws IOException {
    FieldType ft = new FieldType();
    int config = randomInt(4);
    boolean storePositions = false;
    boolean storeOffsets = false;
    boolean storeTermVectors = false;
    switch(config) {
        case 0 ->
            {
            }
        case 1 ->
            {
                storeTermVectors = true;
            }
        case 2 ->
            {
                storeTermVectors = true;
                storePositions = true;
            }
        case 3 ->
            {
                storeTermVectors = true;
                storeOffsets = true;
            }
        case 4 ->
            {
                storeTermVectors = true;
                storePositions = true;
                storeOffsets = true;
            }
        default ->
            throw new IllegalArgumentException("Unsupported option: " + config);
    }
    ft.setStoreTermVectors(storeTermVectors);
    ft.setStoreTermVectorOffsets(storeOffsets);
    ft.setStoreTermVectorPositions(storePositions);
    String optionString = termVectorOptionsToString(ft);
    XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties").startObject("field").field("type", "text").field("term_vector", optionString).field("analyzer", "tv_test").endObject().endObject().endObject().endObject();
    assertAcked(prepareCreate("test").setMapping(mapping).setSettings(Settings.builder().put("index.analysis.analyzer.tv_test.tokenizer", "standard").putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
    for (int i = 0; i < 10; i++) {
        prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()).get();
        refresh();
    }
    String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" };
    int[] freq = { 1, 1, 1, 1, 1, 1, 1, 2 };
    int[][] pos = { { 2 }, { 8 }, { 3 }, { 4 }, { 7 }, { 5 }, { 1 }, { 0, 6 } };
    int[][] startOffset = { { 10 }, { 40 }, { 16 }, { 20 }, { 35 }, { 26 }, { 4 }, { 0, 31 } };
    int[][] endOffset = { { 15 }, { 43 }, { 19 }, { 25 }, { 39 }, { 30 }, { 9 }, { 3, 34 } };
    boolean isOffsetRequested = randomBoolean();
    boolean isPositionsRequested = randomBoolean();
    String infoString = createInfoString(isPositionsRequested, isOffsetRequested, optionString);
    for (int i = 0; i < 10; i++) {
        TermVectorsRequestBuilder resp = client().prepareTermVectors("test", Integer.toString(i)).setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
        TermVectorsResponse response = resp.get();
        assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
        Fields fields = response.getFields();
        assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
        if (ft.storeTermVectors()) {
            Terms terms = fields.terms("field");
            assertThat(terms.size(), equalTo(8L));
            TermsEnum iterator = terms.iterator();
            for (int j = 0; j < values.length; j++) {
                String string = values[j];
                BytesRef next = iterator.next();
                assertThat(infoString, next, notNullValue());
                assertThat(infoString + "expected " + string, string, equalTo(next.utf8ToString()));
                assertThat(infoString, next, notNullValue());
                PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL);
                assertThat(infoString, docsAndPositions.nextDoc(), equalTo(0));
                assertThat(infoString, freq[j], equalTo(docsAndPositions.freq()));
                int[] termPos = pos[j];
                int[] termStartOffset = startOffset[j];
                int[] termEndOffset = endOffset[j];
                if (isPositionsRequested && storePositions) {
                    assertThat(infoString, termPos.length, equalTo(freq[j]));
                }
                if (isOffsetRequested && storeOffsets) {
                    assertThat(termStartOffset.length, equalTo(freq[j]));
                    assertThat(termEndOffset.length, equalTo(freq[j]));
                }
                for (int k = 0; k < freq[j]; k++) {
                    int nextPosition = docsAndPositions.nextPosition();
                    if (isPositionsRequested && storePositions) {
                        assertThat(infoString + "positions for term: " + string, nextPosition, equalTo(termPos[k]));
                    } else {
                        assertThat(infoString + "positions for term: ", nextPosition, equalTo(-1));
                    }
                    assertNull(infoString + "payloads for term: " + string, docsAndPositions.getPayload());
                    if (isOffsetRequested && storeOffsets) {
                        assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
                        assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
                    } else {
                        assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(), equalTo(-1));
                        assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(-1));
                    }
                }
            }
            assertThat(iterator.next(), nullValue());
        }
    }
}
263204.712142elasticsearch
public void testEquivalence() {
    var response = prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get();
    final int numDocs;
    try {
        numDocs = (int) response.getHits().getTotalHits().value;
    } finally {
        response.decRef();
    }
    int numIters = scaledRandomIntBetween(5, 10);
    for (int i = 0; i < numIters; i++) {
        {
            MatchQueryParser.Type type = MatchQueryParser.Type.BOOLEAN;
            MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("marvel hero captain america", "*_name", randomBoolean() ? "category" : "categ*");
            assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(randomizeType(multiMatchQueryBuilder.operator(Operator.OR).type(type))), left -> assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(disMaxQuery().add(matchQuery("full_name", "marvel hero captain america")).add(matchQuery("first_name", "marvel hero captain america")).add(matchQuery("last_name", "marvel hero captain america")).add(matchQuery("category", "marvel hero captain america"))), right -> assertEquivalent("marvel hero captain america", left, right)));
        }
        {
            MatchQueryParser.Type type = MatchQueryParser.Type.BOOLEAN;
            String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
            Operator op = randomBoolean() ? Operator.AND : Operator.OR;
            MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") : multiMatchQuery("captain america", "*_name", randomBoolean() ? "category" : "categ*");
            assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(randomizeType(multiMatchQueryBuilder.operator(op).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch).type(type))), left -> assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(boolQuery().minimumShouldMatch(minShouldMatch).should(randomBoolean() ? termQuery("full_name", "captain america") : matchQuery("full_name", "captain america").operator(op)).should(matchQuery("first_name", "captain america").operator(op)).should(matchQuery("last_name", "captain america").operator(op)).should(matchQuery("category", "captain america").operator(op))), right -> assertEquivalent("captain america", left, right)));
        }
        {
            String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
            assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(randomizeType(multiMatchQuery("capta", "full_name", "first_name", "last_name", "category").type(MatchQueryParser.Type.PHRASE_PREFIX).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch))), left -> assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(boolQuery().minimumShouldMatch(minShouldMatch).should(matchPhrasePrefixQuery("full_name", "capta")).should(matchPhrasePrefixQuery("first_name", "capta")).should(matchPhrasePrefixQuery("last_name", "capta")).should(matchPhrasePrefixQuery("category", "capta"))), right -> assertEquivalent("capta", left, right)));
        }
        {
            String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
            SearchRequestBuilder leftSearch;
            if (randomBoolean()) {
                leftSearch = prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type(MatchQueryParser.Type.PHRASE).minimumShouldMatch(minShouldMatch)));
            } else {
                leftSearch = prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category").type(MatchQueryParser.Type.PHRASE).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch)));
            }
            assertResponse(leftSearch, left -> assertResponse(prepareSearch("test").setSize(numDocs).addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("id")).setQuery(boolQuery().minimumShouldMatch(minShouldMatch).should(matchPhraseQuery("full_name", "captain america")).should(matchPhraseQuery("first_name", "captain america")).should(matchPhraseQuery("last_name", "captain america")).should(matchPhraseQuery("category", "captain america"))), right -> assertEquivalent("captain america", left, right)));
        }
    }
}
263172.581177elasticsearch
protected Table getTableWithHeader(final RestRequest request) {
    Table table = new Table();
    table.startHeaders();
    table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id");
    table.addCell("pid", "default:false;alias:p;desc:process id");
    table.addCell("ip", "alias:i;desc:ip address");
    table.addCell("port", "default:false;alias:po;desc:bound transport port");
    table.addCell("http_address", "default:false;alias:http;desc:bound http address");
    table.addCell("version", "default:false;alias:v;desc:es version");
    table.addCell("type", "default:false;alias:t;desc:es distribution type");
    table.addCell("build", "default:false;alias:b;desc:es build hash");
    table.addCell("jdk", "default:false;alias:j;desc:jdk version");
    table.addCell("disk.total", "default:false;alias:dt,diskTotal;text-align:right;desc:total disk space");
    table.addCell("disk.used", "default:false;alias:du,diskUsed;text-align:right;desc:used disk space");
    table.addCell("disk.avail", "default:false;alias:d,da,disk,diskAvail;text-align:right;desc:available disk space");
    table.addCell("disk.used_percent", "default:false;alias:dup,diskUsedPercent;text-align:right;desc:used disk space percentage");
    table.addCell("heap.current", "default:false;alias:hc,heapCurrent;text-align:right;desc:used heap");
    table.addCell("heap.percent", "alias:hp,heapPercent;text-align:right;desc:used heap ratio");
    table.addCell("heap.max", "default:false;alias:hm,heapMax;text-align:right;desc:max configured heap");
    table.addCell("ram.current", "default:false;alias:rc,ramCurrent;text-align:right;desc:used machine memory");
    table.addCell("ram.percent", "alias:rp,ramPercent;text-align:right;desc:used machine memory ratio");
    table.addCell("ram.max", "default:false;alias:rm,ramMax;text-align:right;desc:total machine memory");
    table.addCell("file_desc.current", "default:false;alias:fdc,fileDescriptorCurrent;text-align:right;desc:used file descriptors");
    table.addCell("file_desc.percent", "default:false;alias:fdp,fileDescriptorPercent;text-align:right;desc:used file descriptor ratio");
    table.addCell("file_desc.max", "default:false;alias:fdm,fileDescriptorMax;text-align:right;desc:max file descriptors");
    table.addCell("cpu", "alias:cpu;text-align:right;desc:recent cpu usage");
    table.addCell("load_1m", "alias:l;text-align:right;desc:1m load avg");
    table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg");
    table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg");
    table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime");
    table.addCell("node.role", "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only");
    table.addCell("master", "alias:m;desc:*:current master");
    table.addCell("name", "alias:n;desc:node name");
    table.addCell("completion.size", "alias:cs,completionSize;default:false;text-align:right;desc:size of completion");
    table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
    table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
    table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
    table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
    table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts");
    table.addCell("query_cache.miss_count", "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts");
    table.addCell("request_cache.memory_size", "alias:rcm,requestCacheMemory;default:false;text-align:right;desc:used request cache");
    table.addCell("request_cache.evictions", "alias:rce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions");
    table.addCell("request_cache.hit_count", "alias:rchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts");
    table.addCell("request_cache.miss_count", "alias:rcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts");
    table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
    table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
    table.addCell("get.current", "alias:gc,getCurrent;default:false;text-align:right;desc:number of current get ops");
    table.addCell("get.time", "alias:gti,getTime;default:false;text-align:right;desc:time spent in get");
    table.addCell("get.total", "alias:gto,getTotal;default:false;text-align:right;desc:number of get ops");
    table.addCell("get.exists_time", "alias:geti,getExistsTime;default:false;text-align:right;desc:time spent in successful gets");
    table.addCell("get.exists_total", "alias:geto,getExistsTotal;default:false;text-align:right;desc:number of successful gets");
    table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
    table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
    table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
    table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
    table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
    table.addCell("indexing.index_current", "alias:iic,indexingIndexCurrent;default:false;text-align:right;desc:number of current indexing ops");
    table.addCell("indexing.index_time", "alias:iiti,indexingIndexTime;default:false;text-align:right;desc:time spent in indexing");
    table.addCell("indexing.index_total", "alias:iito,indexingIndexTotal;default:false;text-align:right;desc:number of indexing ops");
    table.addCell("indexing.index_failed", "alias:iif,indexingIndexFailed;default:false;text-align:right;desc:number of failed indexing ops");
    table.addCell("merges.current", "alias:mc,mergesCurrent;default:false;text-align:right;desc:number of current merges");
    table.addCell("merges.current_docs", "alias:mcd,mergesCurrentDocs;default:false;text-align:right;desc:number of current merging docs");
    table.addCell("merges.current_size", "alias:mcs,mergesCurrentSize;default:false;text-align:right;desc:size of current merges");
    table.addCell("merges.total", "alias:mt,mergesTotal;default:false;text-align:right;desc:number of completed merge ops");
    table.addCell("merges.total_docs", "alias:mtd,mergesTotalDocs;default:false;text-align:right;desc:docs merged");
    table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
    table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
    table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
    table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
    table.addCell("refresh.external_total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes");
    table.addCell("refresh.external_time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes");
    table.addCell("refresh.listeners", "alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners");
    table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations");
    table.addCell("script.cache_evictions", "alias:scrce,scriptCacheEvictions;default:false;text-align:right;desc:script cache evictions");
    table.addCell("script.compilation_limit_triggered", "alias:scrclt,scriptCacheCompilationLimitTriggered;default:false;" + "text-align:right;desc:script cache compilation limit triggered");
    table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
    table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
    table.addCell("search.fetch_total", "alias:sfto,searchFetchTotal;default:false;text-align:right;desc:total fetch ops");
    table.addCell("search.open_contexts", "alias:so,searchOpenContexts;default:false;text-align:right;desc:open search contexts");
    table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops");
    table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase");
    table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops");
    table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts");
    table.addCell("search.scroll_time", "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open");
    table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts");
    table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments");
    table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments");
    table.addCell("segments.index_writer_memory", "alias:siwm,segmentsIndexWriterMemory;default:false;text-align:right;desc:memory used by index writer");
    table.addCell("segments.version_map_memory", "alias:svmm,segmentsVersionMapMemory;default:false;text-align:right;desc:memory used by version map");
    table.addCell("segments.fixed_bitset_memory", "alias:sfbm,fixedBitsetMemory;default:false;text-align:right;desc:memory used by fixed bit sets for nested object field types" + " and type filters for types referred in _parent fields");
    table.addCell("suggest.current", "alias:suc,suggestCurrent;default:false;text-align:right;desc:number of current suggest ops");
    table.addCell("suggest.time", "alias:suti,suggestTime;default:false;text-align:right;desc:time spend in suggest");
    table.addCell("suggest.total", "alias:suto,suggestTotal;default:false;text-align:right;desc:number of suggest ops");
    table.addCell("bulk.total_operations", "alias:bto,bulkTotalOperations;default:false;text-align:right;desc:number of bulk shard ops");
    table.addCell("bulk.total_time", "alias:btti,bulkTotalTime;default:false;text-align:right;desc:time spend in shard bulk");
    table.addCell("bulk.total_size_in_bytes", "alias:btsi,bulkTotalSizeInBytes;default:false;text-align:right;desc:total size in bytes of shard bulk");
    table.addCell("bulk.avg_time", "alias:bati,bulkAvgTime;default:false;text-align:right;desc:average time spend in shard bulk");
    table.addCell("bulk.avg_size_in_bytes", "alias:basi,bulkAvgSizeInBytes;default:false;text-align:right;desc:average size in bytes of shard bulk");
    table.addCell("shard_stats.total_count", "alias:sstc,shards,shardStatsTotalCount;default:false;text-align:right;desc:number of shards assigned");
    table.addCell("mappings.total_count", "alias:mtc,mappingsTotalCount;default:false;text-align:right;desc:number of mappings");
    table.addCell("mappings.total_estimated_overhead_in_bytes", "alias:mteo,mappingsTotalEstimatedOverheadInBytes;default:false;text-align:right;desc:estimated" + " overhead in bytes of mappings");
    table.endHeaders();
    return table;
}
262186.4925133elasticsearch
private int decodeBlock(BytesReference reference) throws IOException {
    int bytesConsumed = 0;
    try {
        switch(currentState) {
            case INIT_BLOCK:
                if (reference.length() < HEADER_LENGTH) {
                    return bytesConsumed;
                }
                try (StreamInput in = reference.streamInput()) {
                    final long magic = in.readLong();
                    if (magic != MAGIC_NUMBER) {
                        throw new IllegalStateException("unexpected block identifier");
                    }
                    final int token = in.readByte();
                    final int compressionLevel = (token & 0x0F) + COMPRESSION_LEVEL_BASE;
                    int blockType = token & 0xF0;
                    int compressedLength = Integer.reverseBytes(in.readInt());
                    if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) {
                        throw new IllegalStateException(String.format(Locale.ROOT, "invalid compressedLength: %d (expected: 0-%d)", compressedLength, MAX_BLOCK_SIZE));
                    }
                    int decompressedLength = Integer.reverseBytes(in.readInt());
                    final int maxDecompressedLength = 1 << compressionLevel;
                    if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) {
                        throw new IllegalStateException(String.format(Locale.ROOT, "invalid decompressedLength: %d (expected: 0-%d)", decompressedLength, maxDecompressedLength));
                    }
                    if (decompressedLength == 0 && compressedLength != 0 || decompressedLength != 0 && compressedLength == 0 || blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) {
                        throw new IllegalStateException(String.format(Locale.ROOT, "stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch", compressedLength, decompressedLength));
                    }
                    in.readInt();
                    bytesConsumed += HEADER_LENGTH;
                    if (decompressedLength == 0) {
                        currentState = State.FINISHED;
                        decompressor = null;
                        break;
                    }
                    this.blockType = blockType;
                    this.compressedLength = compressedLength;
                    this.decompressedLength = decompressedLength;
                }
                currentState = State.DECOMPRESS_DATA;
                break;
            case DECOMPRESS_DATA:
                if (reference.length() < compressedLength) {
                    break;
                }
                byte[] decompressed = getThreadLocalBuffer(DECOMPRESSED, decompressedLength);
                try {
                    switch(blockType) {
                        case BLOCK_TYPE_NON_COMPRESSED:
                            try (StreamInput streamInput = reference.streamInput()) {
                                streamInput.readBytes(decompressed, 0, decompressedLength);
                            }
                            break;
                        case BLOCK_TYPE_COMPRESSED:
                            BytesRef ref = reference.iterator().next();
                            final byte[] compressed;
                            final int compressedOffset;
                            if (ref.length >= compressedLength) {
                                compressed = ref.bytes;
                                compressedOffset = ref.offset;
                            } else {
                                compressed = getThreadLocalBuffer(COMPRESSED, compressedLength);
                                compressedOffset = 0;
                                try (StreamInput streamInput = reference.streamInput()) {
                                    streamInput.readBytes(compressed, 0, compressedLength);
                                }
                            }
                            decompressor.decompress(compressed, compressedOffset, decompressed, 0, decompressedLength);
                            break;
                        default:
                            throw new IllegalStateException(String.format(Locale.ROOT, "unexpected blockType: %d (expected: %d or %d)", blockType, BLOCK_TYPE_NON_COMPRESSED, BLOCK_TYPE_COMPRESSED));
                    }
                    bytesConsumed += compressedLength;
                    int bytesToCopy = decompressedLength;
                    int uncompressedOffset = 0;
                    while (bytesToCopy > 0) {
                        maybeAddNewPage();
                        final Recycler.V<BytesRef> page = pages.getLast();
                        int toCopy = Math.min(bytesToCopy, pageLength - pageOffset);
                        System.arraycopy(decompressed, uncompressedOffset, page.v().bytes, page.v().offset + pageOffset, toCopy);
                        pageOffset += toCopy;
                        bytesToCopy -= toCopy;
                        uncompressedOffset += toCopy;
                    }
                    currentState = State.INIT_BLOCK;
                } catch (LZ4Exception e) {
                    throw new IllegalStateException(e);
                }
                break;
            case FINISHED:
                break;
            case CORRUPTED:
                throw new IllegalStateException("LZ4 stream corrupted.");
            default:
                throw new IllegalStateException();
        }
    } catch (IOException e) {
        currentState = State.CORRUPTED;
        throw e;
    }
    return bytesConsumed;
}
262848.543166elasticsearch
public void testMinimumVersionShardDuringPhaseExecution() throws Exception {
    Version newVersion = Version.CURRENT;
    Version oldVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), VersionUtils.getPreviousVersion(newVersion));
    Version minVersion = newVersion;
    final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
    AtomicInteger successfulOps = new AtomicInteger();
    Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
    DiscoveryNode newVersionNode1 = DiscoveryNodeUtils.builder("node1").version(newVersion).build();
    DiscoveryNode newVersionNode2 = DiscoveryNodeUtils.builder("node2").version(newVersion).build();
    DiscoveryNode oldVersionNode = DiscoveryNodeUtils.builder("node3").version(oldVersion).build();
    lookup.put("node1", new SearchAsyncActionTests.MockConnection(newVersionNode1));
    lookup.put("node2", new SearchAsyncActionTests.MockConnection(newVersionNode2));
    lookup.put("node3", new SearchAsyncActionTests.MockConnection(oldVersionNode));
    OriginalIndices idx = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS);
    ArrayList<SearchShardIterator> list = new ArrayList<>();
    ShardRouting routingNewVersionShard1 = ShardRouting.newUnassigned(new ShardId(new Index("idx", "_na_"), 0), true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), ShardRouting.Role.DEFAULT);
    routingNewVersionShard1 = routingNewVersionShard1.initialize(newVersionNode1.getId(), "p0", 0);
    routingNewVersionShard1.started();
    list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 0), singletonList(routingNewVersionShard1), idx));
    ShardRouting routingNewVersionShard2 = ShardRouting.newUnassigned(new ShardId(new Index("idx", "_na_"), 1), true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), ShardRouting.Role.DEFAULT);
    routingNewVersionShard2 = routingNewVersionShard2.initialize(newVersionNode2.getId(), "p1", 0);
    routingNewVersionShard2.started();
    list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 1), singletonList(routingNewVersionShard2), idx));
    GroupShardsIterator<SearchShardIterator> shardsIter = new GroupShardsIterator<>(list);
    final SearchRequest searchRequest = new SearchRequest(minVersion);
    searchRequest.allowPartialSearchResults(false);
    searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp")));
    SearchTransportService searchTransportService = new SearchTransportService(null, null, null) {

        @Override
        public void sendExecuteQuery(Transport.Connection connection, ShardSearchRequest request, SearchTask task, SearchActionListener<? super SearchPhaseResult> listener) {
            int shardId = request.shardId().id();
            QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("N/A", 123), new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), null);
            SortField sortField = new SortField("timestamp", SortField.Type.LONG);
            if (shardId == 0) {
                queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldDocs(new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, new SortField[] { sortField }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
            } else if (shardId == 1) {
                queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldDocs(new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, new SortField[] { sortField }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
            }
            queryResult.from(0);
            queryResult.size(1);
            successfulOps.incrementAndGet();
            new Thread(() -> listener.onResponse(queryResult)).start();
        }
    };
    SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder());
    SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap());
    QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer(searchRequest, EsExecutors.DIRECT_EXECUTOR_SERVICE, new NoopCircuitBreaker(CircuitBreaker.REQUEST), controller, task::isCancelled, task.getProgressListener(), shardsIter.size(), exc -> {
    });
    CountDownLatch latch = new CountDownLatch(1);
    SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction(logger, null, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", AliasFilter.EMPTY), Collections.emptyMap(), EsExecutors.DIRECT_EXECUTOR_SERVICE, resultConsumer, searchRequest, null, shardsIter, timeProvider, new ClusterState.Builder(new ClusterName("test")).build(), task, SearchResponse.Clusters.EMPTY) {

        @Override
        protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
            return new SearchPhase("test") {

                @Override
                public void run() {
                    latch.countDown();
                }
            };
        }
    };
    ShardRouting routingOldVersionShard = ShardRouting.newUnassigned(new ShardId(new Index("idx", "_na_"), 2), true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), ShardRouting.Role.DEFAULT);
    SearchShardIterator shardIt = new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 2), singletonList(routingOldVersionShard), idx);
    routingOldVersionShard = routingOldVersionShard.initialize(oldVersionNode.getId(), "p2", 0);
    routingOldVersionShard.started();
    action.start();
    latch.await();
    assertThat(successfulOps.get(), equalTo(2));
    SearchPhaseController.ReducedQueryPhase phase = action.results.reduce();
    assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1));
    assertThat(phase.totalHits().value, equalTo(2L));
    assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO));
    SearchShardTarget searchShardTarget = new SearchShardTarget("node3", shardIt.shardId(), null);
    SearchActionListener<SearchPhaseResult> listener = new SearchActionListener<SearchPhaseResult>(searchShardTarget, 0) {

        @Override
        public void onFailure(Exception e) {
        }

        @Override
        protected void innerOnResponse(SearchPhaseResult response) {
        }
    };
    Exception e = expectThrows(VersionMismatchException.class, () -> action.executePhaseOnShard(shardIt, searchShardTarget, listener));
    assertThat(e.getMessage(), equalTo("One of the shards is incompatible with the required minimum version [" + minVersion + "]"));
}
264542.956138elasticsearch
private void doTestCanRemainUsesLeastAvailableSpace(boolean testMaxHeadroom) {
    ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss);
    Map<ClusterInfo.NodeAndShard, String> shardRoutingMap = new HashMap<>();
    DiscoveryNode node_0 = DiscoveryNodeUtils.builder("node_0").roles(new HashSet<>(DiscoveryNodeRole.roles())).build();
    DiscoveryNode node_1 = DiscoveryNodeUtils.builder("node_1").roles(new HashSet<>(DiscoveryNodeRole.roles())).build();
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)).build();
    final IndexMetadata indexMetadata = metadata.index("test");
    ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(indexMetadata.getIndex(), 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_0 = ShardRoutingHelper.initialize(test_0, node_0.getId());
    test_0 = ShardRoutingHelper.moveToStarted(test_0);
    shardRoutingMap.put(ClusterInfo.NodeAndShard.from(test_0), "/node0/least");
    ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(indexMetadata.getIndex(), 1), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_1 = ShardRoutingHelper.initialize(test_1, node_1.getId());
    test_1 = ShardRoutingHelper.moveToStarted(test_1);
    shardRoutingMap.put(ClusterInfo.NodeAndShard.from(test_1), "/node1/least");
    ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(indexMetadata.getIndex(), 2), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_2 = ShardRoutingHelper.initialize(test_2, node_1.getId());
    test_2 = ShardRoutingHelper.moveToStarted(test_2);
    shardRoutingMap.put(ClusterInfo.NodeAndShard.from(test_2), "/node1/most");
    ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(indexMetadata.getIndex(), 3), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_3 = ShardRoutingHelper.initialize(test_3, node_1.getId());
    test_3 = ShardRoutingHelper.moveToStarted(test_3);
    RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build();
    logger.info("--> adding two nodes");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node_0).add(node_1)).build();
    final long totalBytes = testMaxHeadroom ? ByteSizeValue.ofGb(10000).getBytes() : 100;
    final long exactFreeSpaceForHighWatermark = testMaxHeadroom ? ByteSizeValue.ofGb(150).getBytes() : 10;
    final long exactFreeSpaceForBelowHighWatermark = exactFreeSpaceForHighWatermark - 1;
    final double exactUsedSpaceForBelowHighWatermark = 100.0 * (totalBytes - exactFreeSpaceForBelowHighWatermark) / totalBytes;
    final long ninetyPercentFreeSpace = (long) (totalBytes * 0.9);
    Map<String, DiskUsage> leastAvailableUsages = new HashMap<>();
    leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "/node0/least", totalBytes, exactFreeSpaceForHighWatermark));
    leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "/node1/least", totalBytes, exactFreeSpaceForBelowHighWatermark));
    Map<String, DiskUsage> mostAvailableUsage = new HashMap<>();
    mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "/node0/most", totalBytes, ninetyPercentFreeSpace));
    mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "/node1/most", totalBytes, ninetyPercentFreeSpace));
    Map<String, Long> shardSizes = new HashMap<>();
    shardSizes.put("[test][0][p]", exactFreeSpaceForHighWatermark);
    shardSizes.put("[test][1][p]", exactFreeSpaceForHighWatermark);
    shardSizes.put("[test][2][p]", exactFreeSpaceForHighWatermark);
    final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages, mostAvailableUsage, shardSizes, Map.of(), shardRoutingMap, Map.of());
    RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), clusterState, clusterInfo, null, System.nanoTime());
    allocation.debugDecision(true);
    Decision decision = decider.canRemain(indexMetadata, test_0, RoutingNodesHelper.routingNode("node_0", node_0), allocation);
    assertEquals(Decision.Type.YES, decision.type());
    assertThat(((Decision.Single) decision).getExplanation(), containsString("there is enough disk on this node for the shard to remain, free: [" + ByteSizeValue.ofBytes(exactFreeSpaceForHighWatermark) + "]"));
    decision = decider.canRemain(indexMetadata, test_1, RoutingNodesHelper.routingNode("node_1", node_1), allocation);
    assertEquals(Decision.Type.NO, decision.type());
    assertThat(((Decision.Single) decision).getExplanation(), containsString("the shard cannot remain on this node because it is above the high watermark cluster setting " + "[cluster.routing.allocation.disk.watermark.high" + (testMaxHeadroom ? ".max_headroom=150gb" : "=90%") + "] and there is less than the required [" + ByteSizeValue.ofBytes(exactFreeSpaceForHighWatermark) + "] free space on " + "node, actual free: [" + ByteSizeValue.ofBytes(exactFreeSpaceForBelowHighWatermark) + "], actual used: [" + Strings.format1Decimals(exactUsedSpaceForBelowHighWatermark, "%") + "]"));
    try {
        decider.canRemain(indexMetadata, test_0, RoutingNodesHelper.routingNode("node_1", node_1), allocation);
        fail("not allocated on this node");
    } catch (IllegalArgumentException ex) {
    }
    try {
        decider.canRemain(indexMetadata, test_1, RoutingNodesHelper.routingNode("node_0", node_0), allocation);
        fail("not allocated on this node");
    } catch (IllegalArgumentException ex) {
    }
    decision = decider.canRemain(indexMetadata, test_2, RoutingNodesHelper.routingNode("node_1", node_1), allocation);
    assertEquals("can stay since allocated on a different path with enough space", Decision.Type.YES, decision.type());
    assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
    decision = decider.canRemain(indexMetadata, test_2, RoutingNodesHelper.routingNode("node_1", node_1), allocation);
    assertEquals("can stay since we don't have information about this shard", Decision.Type.YES, decision.type());
    assertThat(((Decision.Single) decision).getExplanation(), containsString("this shard is not allocated on the most utilized disk and can remain"));
}
264449.5310139elasticsearch
public void testResizeIndexSettingsRemovedAfterStart() {
    final DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(newNode("node1", "id1", MASTER_DATA_ROLES)).add(newNode("node2", "id2", MASTER_DATA_ROLES)).build();
    final DiscoveryNode resizeNode = randomFrom(discoveryNodes.getDataNodes().values());
    final String sourceIndex = "source";
    final String targetIndex = "target";
    final Metadata sourceMetadata = Metadata.builder().put(IndexMetadata.builder(sourceIndex).settings(settings(IndexVersion.current()).put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_name", resizeNode.getName()).put("index.blocks.write", true)).numberOfShards(2).numberOfReplicas(0).setRoutingNumShards(16)).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(sourceMetadata.index(sourceIndex))).metadata(sourceMetadata).nodes(discoveryNodes).build();
    {
        IndexRoutingTable sourceRoutingTable = clusterState.routingTable().index(sourceIndex);
        assertThat(sourceRoutingTable.size(), equalTo(2));
        assertThat(sourceRoutingTable.shard(0).primaryShard().state(), equalTo(UNASSIGNED));
        assertThat(sourceRoutingTable.shard(1).primaryShard().state(), equalTo(UNASSIGNED));
    }
    final AllocationService allocationService = createAllocationService(Settings.builder().put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 16).put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 16).build());
    clusterState = allocationService.reroute(clusterState, "reroute", ActionListener.noop());
    {
        IndexRoutingTable sourceRoutingTable = clusterState.routingTable().index(sourceIndex);
        assertThat(sourceRoutingTable.size(), equalTo(2));
        assertThat(sourceRoutingTable.shard(0).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(sourceRoutingTable.shard(1).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(sourceRoutingTable.shard(0).primaryShard().currentNodeId(), equalTo(resizeNode.getId()));
        assertThat(sourceRoutingTable.shard(1).primaryShard().currentNodeId(), equalTo(resizeNode.getId()));
    }
    clusterState = startInitializingShardsAndReroute(allocationService, clusterState);
    {
        IndexRoutingTable sourceRoutingTable = clusterState.routingTable().index(sourceIndex);
        assertThat(sourceRoutingTable.size(), equalTo(2));
        assertThat(sourceRoutingTable.shard(0).primaryShard().state(), equalTo(STARTED));
        assertThat(sourceRoutingTable.shard(1).primaryShard().state(), equalTo(STARTED));
        assertThat(sourceRoutingTable.shard(0).primaryShard().currentNodeId(), equalTo(resizeNode.getId()));
        assertThat(sourceRoutingTable.shard(1).primaryShard().currentNodeId(), equalTo(resizeNode.getId()));
    }
    final int targetNumShards = randomFrom(1, 2, 4, 8, 16);
    final int targetNumReplicas = randomInt(2);
    final Settings.Builder targetSettings = indexSettings(IndexVersion.current(), targetNumShards, targetNumReplicas);
    targetSettings.put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.getKey(), sourceIndex);
    targetSettings.put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID.getKey(), sourceMetadata.index(sourceIndex).getIndexUUID());
    final boolean isShrink = randomBoolean();
    if (isShrink) {
        targetSettings.put(IndexMetadata.INDEX_SHRINK_INITIAL_RECOVERY_KEY, resizeNode.getId());
    }
    final boolean hasLifecyclePolicy = randomBoolean();
    if (hasLifecyclePolicy) {
        targetSettings.put(IndexMetadata.LIFECYCLE_NAME, "policy");
    }
    clusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(targetIndex).settings(targetSettings).setRoutingNumShards(16))).build();
    clusterState = ClusterState.builder(clusterState).routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState.routingTable()).addAsNew(clusterState.metadata().index(targetIndex))).build();
    {
        IndexRoutingTable targetRoutingTable = clusterState.routingTable().index(targetIndex);
        assertThat(targetRoutingTable.size(), equalTo(targetNumShards));
        for (int i = 0; i < targetNumShards; i++) {
            ShardRouting shardRouting = targetRoutingTable.shard(i).primaryShard();
            assertThat(shardRouting.toString(), shardRouting.state(), equalTo(UNASSIGNED));
        }
    }
    clusterState = allocationService.reroute(clusterState, "reroute", ActionListener.noop());
    {
        IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndex);
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.exists(targetIndexMetadata.getSettings()), is(true));
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_UUID.exists(targetIndexMetadata.getSettings()), is(true));
        assertThat(targetIndexMetadata.getSettings().hasValue(IndexMetadata.INDEX_SHRINK_INITIAL_RECOVERY_KEY), is(isShrink));
        IndexRoutingTable targetRoutingTable = clusterState.routingTable().index(targetIndex);
        assertThat(targetRoutingTable.size(), equalTo(targetNumShards));
        for (int i = 0; i < targetNumShards; i++) {
            ShardRouting shardRouting = targetRoutingTable.shard(i).primaryShard();
            assertThat(shardRouting.toString(), shardRouting.state(), equalTo(INITIALIZING));
        }
    }
    while (true) {
        IndexRoutingTable targetIndexRoutingTable = clusterState.routingTable().index(targetIndex);
        List<ShardRouting> initializing = targetIndexRoutingTable.shardsWithState(INITIALIZING);
        if (initializing.isEmpty()) {
            break;
        }
        IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndex);
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.exists(targetIndexMetadata.getSettings()), is(hasLifecyclePolicy || (targetIndexRoutingTable.allPrimaryShardsActive() == false)));
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_UUID.exists(targetIndexMetadata.getSettings()), is(targetIndexRoutingTable.allPrimaryShardsActive() == false));
        assertThat(targetIndexMetadata.getSettings().hasValue(IndexMetadata.INDEX_SHRINK_INITIAL_RECOVERY_KEY), is(targetIndexRoutingTable.allPrimaryShardsActive() ? false : isShrink));
        clusterState = startShardsAndReroute(allocationService, clusterState, randomNonEmptySubsetOf(initializing));
    }
    {
        IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndex);
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.exists(targetIndexMetadata.getSettings()), is(hasLifecyclePolicy));
        assertThat(IndexMetadata.INDEX_RESIZE_SOURCE_UUID.exists(targetIndexMetadata.getSettings()), is(false));
        assertThat(targetIndexMetadata.getSettings().hasValue(IndexMetadata.INDEX_SHRINK_INITIAL_RECOVERY_KEY), is(false));
        IndexRoutingTable targetRoutingTable = clusterState.routingTable().index(targetIndex);
        assertThat(targetRoutingTable.size(), equalTo(targetNumShards));
        for (int i = 0; i < targetNumShards; i++) {
            ShardRouting shardRouting = targetRoutingTable.shard(i).primaryShard();
            assertThat(shardRouting.toString(), shardRouting.state(), equalTo(STARTED));
        }
    }
}
263497.1416137elasticsearch
public void testConcurrently() throws IOException, InterruptedException {
    HashSet<BytesRef> keySet = new HashSet<>();
    int numKeys = randomIntBetween(50, 200);
    for (int i = 0; i < numKeys; i++) {
        keySet.add(uid(TestUtil.randomSimpleString(random(), 10, 20)));
    }
    List<BytesRef> keyList = new ArrayList<>(keySet);
    ConcurrentHashMap<BytesRef, VersionValue> values = new ConcurrentHashMap<>();
    ConcurrentHashMap<BytesRef, DeleteVersionValue> deletes = new ConcurrentHashMap<>();
    LiveVersionMap map = new LiveVersionMap();
    int numThreads = randomIntBetween(2, 5);
    Thread[] threads = new Thread[numThreads];
    CountDownLatch startGun = new CountDownLatch(numThreads);
    CountDownLatch done = new CountDownLatch(numThreads);
    int randomValuesPerThread = randomIntBetween(5000, 20000);
    final AtomicLong clock = new AtomicLong(0);
    final AtomicLong lastPrunedTimestamp = new AtomicLong(-1);
    final AtomicLong maxSeqNo = new AtomicLong();
    final AtomicLong lastPrunedSeqNo = new AtomicLong();
    for (int j = 0; j < threads.length; j++) {
        threads[j] = new Thread(() -> {
            startGun.countDown();
            try {
                startGun.await();
            } catch (InterruptedException e) {
                done.countDown();
                throw new AssertionError(e);
            }
            try {
                for (int i = 0; i < randomValuesPerThread; ++i) {
                    BytesRef bytesRef = randomFrom(random(), keyList);
                    try (Releasable r = map.acquireLock(bytesRef)) {
                        VersionValue versionValue = values.computeIfAbsent(bytesRef, v -> new IndexVersionValue(randomTranslogLocation(), randomLong(), maxSeqNo.incrementAndGet(), randomLong()));
                        boolean isDelete = versionValue instanceof DeleteVersionValue;
                        if (isDelete) {
                            map.removeTombstoneUnderLock(bytesRef);
                            deletes.remove(bytesRef);
                        }
                        if (isDelete == false && rarely()) {
                            versionValue = new DeleteVersionValue(versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term, clock.getAndIncrement());
                            deletes.put(bytesRef, (DeleteVersionValue) versionValue);
                            map.putDeleteUnderLock(bytesRef, (DeleteVersionValue) versionValue);
                        } else {
                            versionValue = new IndexVersionValue(randomTranslogLocation(), versionValue.version + 1, maxSeqNo.incrementAndGet(), versionValue.term);
                            map.putIndexUnderLock(bytesRef, (IndexVersionValue) versionValue);
                        }
                        values.put(bytesRef, versionValue);
                    }
                    if (rarely()) {
                        final long pruneSeqNo = randomLongBetween(0, maxSeqNo.get());
                        final long clockTick = randomLongBetween(0, clock.get());
                        map.pruneTombstones(clockTick, pruneSeqNo);
                        lastPrunedTimestamp.updateAndGet(prev -> Math.max(clockTick, prev));
                        lastPrunedSeqNo.updateAndGet(prev -> Math.max(pruneSeqNo, prev));
                    }
                }
            } finally {
                done.countDown();
            }
        });
        threads[j].start();
    }
    do {
        final Map<BytesRef, VersionValue> valueMap = new HashMap<>(map.getAllCurrent());
        map.beforeRefresh();
        valueMap.forEach((k, v) -> {
            try (Releasable r = map.acquireLock(k)) {
                VersionValue actualValue = map.getUnderLock(k);
                assertNotNull(actualValue);
                assertTrue(v.version <= actualValue.version);
            }
        });
        map.afterRefresh(randomBoolean());
        valueMap.forEach((k, v) -> {
            try (Releasable r = map.acquireLock(k)) {
                VersionValue actualValue = map.getUnderLock(k);
                if (actualValue != null) {
                    if (actualValue instanceof DeleteVersionValue) {
                        assertTrue(v.version <= actualValue.version);
                    } else {
                        assertTrue(v.version < actualValue.version);
                    }
                }
            }
        });
        if (randomBoolean()) {
            Thread.yield();
        }
    } while (done.getCount() != 0);
    for (int j = 0; j < threads.length; j++) {
        threads[j].join();
    }
    map.getAllCurrent().forEach((k, v) -> {
        VersionValue versionValue = values.get(k);
        assertNotNull(versionValue);
        assertEquals(v, versionValue);
    });
    Runnable assertTombstones = () -> map.getAllTombstones().entrySet().forEach(e -> {
        VersionValue versionValue = values.get(e.getKey());
        assertNotNull(versionValue);
        assertEquals(e.getValue(), versionValue);
        assertTrue(versionValue instanceof DeleteVersionValue);
    });
    assertTombstones.run();
    map.beforeRefresh();
    assertTombstones.run();
    map.afterRefresh(false);
    assertTombstones.run();
    deletes.entrySet().forEach(e -> {
        try (Releasable r = map.acquireLock(e.getKey())) {
            VersionValue value = map.getUnderLock(e.getKey());
            final DeleteVersionValue delete = e.getValue();
            if (value == null) {
                assertTrue(delete.time + " > " + lastPrunedTimestamp.get() + "," + delete.seqNo + " > " + lastPrunedSeqNo.get(), delete.time <= lastPrunedTimestamp.get() && delete.seqNo <= lastPrunedSeqNo.get());
            } else {
                assertEquals(value, delete);
            }
        }
    });
    map.pruneTombstones(clock.incrementAndGet(), maxSeqNo.get());
    assertThat(map.getAllTombstones().entrySet(), empty());
}
263747.511171elasticsearch
public void testRangeQuery() {
    MappedFieldType ft = new IpFieldMapper.IpFieldType("field");
    Query query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE);
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE);
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE);
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, MOCK_CONTEXT));
    assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("::", "::", true, false, null, null, null, MOCK_CONTEXT));
    assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE);
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT));
    query = InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::"));
    assertEquals(new IndexOrDocValuesQuery(query, convertToDocValuesQuery(query)), ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, MOCK_CONTEXT));
    ft = new IpFieldMapper.IpFieldType("field", true, false);
    assertEquals(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, MOCK_CONTEXT));
    ft = new IpFieldMapper.IpFieldType("field", false);
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddressPoint.MAX_VALUE)), ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.2.0"))), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), true, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("192.168.1.255"))), ft.rangeQuery(null, "192.168.2.0", randomBoolean(), false, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddressPoint.MAX_VALUE)), ft.rangeQuery("2001:db8::", null, true, randomBoolean(), null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddressPoint.MAX_VALUE)), ft.rangeQuery("2001:db8::", null, false, randomBoolean(), null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::"), InetAddresses.forString("2001:db8::ffff"))), ft.rangeQuery("2001:db8::", "2001:db8::ffff", true, true, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::1"), InetAddresses.forString("2001:db8::fffe"))), ft.rangeQuery("2001:db8::", "2001:db8::ffff", false, false, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("2001:db8::2"), InetAddresses.forString("2001:db8::"))), ft.rangeQuery("2001:db8::1", "2001:db8::1", false, false, null, null, null, MOCK_CONTEXT));
    assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("::", "::", true, false, null, null, null, MOCK_CONTEXT));
    assertEquals(new MatchNoDocsQuery(), ft.rangeQuery("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::"), InetAddresses.forString("::fffe:ffff:ffff"))), ft.rangeQuery("::", "0.0.0.0", true, false, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("::1:0:0:0"), InetAddressPoint.MAX_VALUE)), ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true, null, null, null, MOCK_CONTEXT));
    assertEquals(convertToDocValuesQuery(InetAddressPoint.newRangeQuery("field", InetAddresses.forString("192.168.1.7"), InetAddresses.forString("2001:db8::"))), ft.rangeQuery("::ffff:c0a8:107", "2001:db8::", true, true, null, null, null, MOCK_CONTEXT));
    MappedFieldType unsearchable = new IpFieldMapper.IpFieldType("field", false, false, false, null, null, Collections.emptyMap(), false);
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.rangeQuery("::1", "2001::", true, true, null, null, null, MOCK_CONTEXT));
    assertEquals("Cannot search on field [field] since it is not indexed nor has doc values.", e.getMessage());
}
263793.6410146elasticsearch
public void testByteVsFloatSimilarity() throws IOException {
    int dims = 5;
    float[] docVector = new float[] { 1f, 127f, -128f, 5f, -10f };
    List<Number> listFloatVector = Arrays.asList(1f, 125f, -12f, 2f, 4f);
    List<Number> listByteVector = Arrays.asList((byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4);
    float[] floatVector = new float[] { 1f, 125f, -12f, 2f, 4f };
    byte[] byteVector = new byte[] { (byte) 1, (byte) 125, (byte) -12, (byte) 2, (byte) 4 };
    List<DenseVectorDocValuesField> fields = List.of(new BinaryDenseVectorDocValuesField(BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersions.V_7_4_0), "field0", ElementType.FLOAT, dims, IndexVersions.V_7_4_0), new BinaryDenseVectorDocValuesField(BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), "field1", ElementType.FLOAT, dims, IndexVersion.current()), new KnnDenseVectorDocValuesField(KnnDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }), "field2", dims), new ByteBinaryDenseVectorDocValuesField(BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.current()), "field3", ElementType.BYTE, dims), new ByteKnnDenseVectorDocValuesField(KnnDenseVectorScriptDocValuesTests.wrapBytes(new float[][] { docVector }), "field4", dims));
    for (DenseVectorDocValuesField field : fields) {
        field.setNextDocId(0);
        ScoreScript scoreScript = mock(ScoreScript.class);
        when(scoreScript.field("vector")).thenAnswer(mock -> field);
        int dotProductExpected = 17382;
        DotProduct dotProduct = new DotProduct(scoreScript, listFloatVector, "vector");
        assertEquals(field.getName(), dotProductExpected, dotProduct.dotProduct(), 0.001);
        dotProduct = new DotProduct(scoreScript, listByteVector, "vector");
        assertEquals(field.getName(), dotProductExpected, dotProduct.dotProduct(), 0.001);
        assertEquals(field.getName(), dotProductExpected, field.get().dotProduct(listFloatVector), 0.001);
        assertEquals(field.getName(), dotProductExpected, field.get().dotProduct(listByteVector), 0.001);
        switch(field.getElementType()) {
            case BYTE ->
                {
                    assertEquals(field.getName(), dotProductExpected, field.get().dotProduct(byteVector));
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().dotProduct(floatVector));
                    assertThat(e.getMessage(), containsString("use [int dotProduct(byte[] queryVector)] instead"));
                }
            case FLOAT ->
                {
                    assertEquals(field.getName(), dotProductExpected, field.get().dotProduct(floatVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().dotProduct(byteVector));
                    assertThat(e.getMessage(), containsString("use [double dotProduct(float[] queryVector)] instead"));
                }
        }
        ;
        int l1NormExpected = 135;
        L1Norm l1Norm = new L1Norm(scoreScript, listFloatVector, "vector");
        assertEquals(field.getName(), l1NormExpected, l1Norm.l1norm(), 0.001);
        l1Norm = new L1Norm(scoreScript, listByteVector, "vector");
        assertEquals(field.getName(), l1NormExpected, l1Norm.l1norm(), 0.001);
        assertEquals(field.getName(), l1NormExpected, field.get().l1Norm(listFloatVector), 0.001);
        assertEquals(field.getName(), l1NormExpected, field.get().l1Norm(listByteVector), 0.001);
        switch(field.getElementType()) {
            case BYTE ->
                {
                    assertEquals(field.getName(), l1NormExpected, field.get().l1Norm(byteVector));
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().l1Norm(floatVector));
                    assertThat(e.getMessage(), containsString("use [int l1Norm(byte[] queryVector)] instead"));
                }
            case FLOAT ->
                {
                    assertEquals(field.getName(), l1NormExpected, field.get().l1Norm(floatVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().l1Norm(byteVector));
                    assertThat(e.getMessage(), containsString("use [double l1Norm(float[] queryVector)] instead"));
                }
        }
        ;
        float l2NormExpected = 116.897f;
        L2Norm l2Norm = new L2Norm(scoreScript, listFloatVector, "vector");
        assertEquals(field.getName(), l2NormExpected, l2Norm.l2norm(), 0.001);
        l2Norm = new L2Norm(scoreScript, listByteVector, "vector");
        assertEquals(field.getName(), l2NormExpected, l2Norm.l2norm(), 0.001);
        assertEquals(field.getName(), l2NormExpected, field.get().l2Norm(listFloatVector), 0.001);
        assertEquals(field.getName(), l2NormExpected, field.get().l2Norm(listByteVector), 0.001);
        switch(field.getElementType()) {
            case BYTE ->
                {
                    assertEquals(field.getName(), l2NormExpected, field.get().l2Norm(byteVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().l2Norm(floatVector));
                    assertThat(e.getMessage(), containsString("use [double l2Norm(byte[] queryVector)] instead"));
                }
            case FLOAT ->
                {
                    assertEquals(field.getName(), l2NormExpected, field.get().l2Norm(floatVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().l2Norm(byteVector));
                    assertThat(e.getMessage(), containsString("use [double l2Norm(float[] queryVector)] instead"));
                }
        }
        ;
        float cosineSimilarityExpected = 0.765f;
        CosineSimilarity cosineSimilarity = new CosineSimilarity(scoreScript, listFloatVector, "vector");
        assertEquals(field.getName(), cosineSimilarityExpected, cosineSimilarity.cosineSimilarity(), 0.001);
        cosineSimilarity = new CosineSimilarity(scoreScript, listByteVector, "vector");
        assertEquals(field.getName(), cosineSimilarityExpected, cosineSimilarity.cosineSimilarity(), 0.001);
        assertEquals(field.getName(), cosineSimilarityExpected, field.get().cosineSimilarity(listFloatVector), 0.001);
        assertEquals(field.getName(), cosineSimilarityExpected, field.get().cosineSimilarity(listByteVector), 0.001);
        switch(field.getElementType()) {
            case BYTE ->
                {
                    assertEquals(field.getName(), cosineSimilarityExpected, field.get().cosineSimilarity(byteVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().cosineSimilarity(floatVector));
                    assertThat(e.getMessage(), containsString("use [double cosineSimilarity(byte[] queryVector, float qvMagnitude)] instead"));
                }
            case FLOAT ->
                {
                    assertEquals(field.getName(), cosineSimilarityExpected, field.get().cosineSimilarity(floatVector), 0.001);
                    UnsupportedOperationException e = expectThrows(UnsupportedOperationException.class, () -> field.get().cosineSimilarity(byteVector));
                    assertThat(e.getMessage(), containsString("use [double cosineSimilarity(float[] queryVector, boolean normalizeQueryVector)] instead"));
                }
        }
    }
}
263568.819124elasticsearch
public void testBuildConnectionProfile() {
    ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
    TimeValue connectTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10));
    TimeValue handshakeTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10));
    TimeValue pingInterval = TimeValue.timeValueMillis(randomIntBetween(1, 10));
    Compression.Enabled compressionEnabled = randomFrom(Compression.Enabled.TRUE, Compression.Enabled.FALSE, Compression.Enabled.INDEXING_DATA);
    Compression.Scheme compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4);
    final boolean setConnectTimeout = randomBoolean();
    if (setConnectTimeout) {
        builder.setConnectTimeout(connectTimeout);
    }
    final boolean setHandshakeTimeout = randomBoolean();
    if (setHandshakeTimeout) {
        builder.setHandshakeTimeout(handshakeTimeout);
    }
    final boolean setCompress = randomBoolean();
    if (setCompress) {
        builder.setCompressionEnabled(compressionEnabled);
    }
    final boolean setCompressionScheme = randomBoolean();
    if (setCompressionScheme) {
        builder.setCompressionScheme(compressionScheme);
    }
    final boolean setPingInterval = randomBoolean();
    if (setPingInterval) {
        builder.setPingInterval(pingInterval);
    }
    builder.addConnections(1, TransportRequestOptions.Type.BULK);
    builder.addConnections(2, TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY);
    builder.addConnections(3, TransportRequestOptions.Type.PING);
    IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, builder::build);
    assertEquals("not all types are added for this connection profile - missing types: [REG]", illegalStateException.getMessage());
    IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> builder.addConnections(4, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.PING));
    assertEquals("type [PING] is already registered", illegalArgumentException.getMessage());
    builder.addConnections(4, TransportRequestOptions.Type.REG);
    final String transportProfile = randomFrom(TransportSettings.DEFAULT_PROFILE, randomAlphaOfLengthBetween(5, 12), null);
    if (transportProfile != null) {
        builder.setTransportProfile(transportProfile);
    }
    ConnectionProfile build = builder.build();
    if (randomBoolean()) {
        build = new ConnectionProfile.Builder(build).build();
    }
    assertEquals(10, build.getNumConnections());
    if (setConnectTimeout) {
        assertEquals(connectTimeout, build.getConnectTimeout());
    } else {
        assertNull(build.getConnectTimeout());
    }
    if (setHandshakeTimeout) {
        assertEquals(handshakeTimeout, build.getHandshakeTimeout());
    } else {
        assertNull(build.getHandshakeTimeout());
    }
    if (setCompress) {
        assertEquals(compressionEnabled, build.getCompressionEnabled());
    } else {
        assertNull(build.getCompressionEnabled());
    }
    if (setCompressionScheme) {
        assertEquals(compressionScheme, build.getCompressionScheme());
    } else {
        assertNull(build.getCompressionScheme());
    }
    if (setPingInterval) {
        assertEquals(pingInterval, build.getPingInterval());
    } else {
        assertNull(build.getPingInterval());
    }
    if (transportProfile != null) {
        assertEquals(transportProfile, build.getTransportProfile());
    } else {
        assertEquals(TransportSettings.DEFAULT_PROFILE, build.getTransportProfile());
    }
    List<Integer> list = new ArrayList<>(10);
    for (int i = 0; i < 10; i++) {
        list.add(i);
    }
    final int numIters = randomIntBetween(5, 10);
    assertEquals(4, build.getHandles().size());
    assertEquals(0, build.getHandles().get(0).offset);
    assertEquals(1, build.getHandles().get(0).length);
    assertEquals(EnumSet.of(TransportRequestOptions.Type.BULK), build.getHandles().get(0).getTypes());
    Integer channel = build.getHandles().get(0).getChannel(list);
    for (int i = 0; i < numIters; i++) {
        assertEquals(0, channel.intValue());
    }
    assertEquals(1, build.getHandles().get(1).offset);
    assertEquals(2, build.getHandles().get(1).length);
    assertEquals(EnumSet.of(TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY), build.getHandles().get(1).getTypes());
    channel = build.getHandles().get(1).getChannel(list);
    for (int i = 0; i < numIters; i++) {
        assertThat(channel, Matchers.anyOf(Matchers.is(1), Matchers.is(2)));
    }
    assertEquals(3, build.getHandles().get(2).offset);
    assertEquals(3, build.getHandles().get(2).length);
    assertEquals(EnumSet.of(TransportRequestOptions.Type.PING), build.getHandles().get(2).getTypes());
    channel = build.getHandles().get(2).getChannel(list);
    for (int i = 0; i < numIters; i++) {
        assertThat(channel, Matchers.anyOf(Matchers.is(3), Matchers.is(4), Matchers.is(5)));
    }
    assertEquals(6, build.getHandles().get(3).offset);
    assertEquals(4, build.getHandles().get(3).length);
    assertEquals(EnumSet.of(TransportRequestOptions.Type.REG), build.getHandles().get(3).getTypes());
    channel = build.getHandles().get(3).getChannel(list);
    for (int i = 0; i < numIters; i++) {
        assertThat(channel, Matchers.anyOf(Matchers.is(6), Matchers.is(7), Matchers.is(8), Matchers.is(9)));
    }
    assertEquals(3, build.getNumConnectionsPerType(TransportRequestOptions.Type.PING));
    assertEquals(4, build.getNumConnectionsPerType(TransportRequestOptions.Type.REG));
    assertEquals(2, build.getNumConnectionsPerType(TransportRequestOptions.Type.STATE));
    assertEquals(2, build.getNumConnectionsPerType(TransportRequestOptions.Type.RECOVERY));
    assertEquals(1, build.getNumConnectionsPerType(TransportRequestOptions.Type.BULK));
}
262972.9925129elasticsearch
public void testPortSettingsConstruction() {
    String hostValue = NetworkAddress.format(randomIp(true));
    Settings.Builder testSettingsBuilder = Settings.builder().put(REMOTE_CLUSTER_SERVER_ENABLED.getKey(), true).put(randomFrom(RemoteClusterPortSettings.HOST, TransportSettings.BIND_HOST, TransportSettings.HOST).getKey(), hostValue);
    boolean publishHostSet = randomBoolean();
    String publishHostValue = publishHostSet ? NetworkAddress.format(randomIp(true)) : hostValue;
    if (publishHostSet) {
        testSettingsBuilder.put(RemoteClusterPortSettings.PUBLISH_HOST.getKey(), publishHostValue);
    }
    boolean bindHostSet = randomBoolean();
    String bindHostValue = bindHostSet ? NetworkAddress.format(randomIp(true)) : hostValue;
    if (bindHostSet) {
        testSettingsBuilder.put(RemoteClusterPortSettings.BIND_HOST.getKey(), bindHostValue);
    }
    boolean portSet = randomBoolean();
    int portValue = portSet ? randomInt(65535) : RemoteClusterPortSettings.PORT.getDefault(Settings.EMPTY);
    if (portSet) {
        testSettingsBuilder.put(RemoteClusterPortSettings.PORT.getKey(), portValue);
    }
    boolean publishPortSet = randomBoolean();
    int publishPortValue = publishPortSet ? randomInt(65535) : -1;
    if (publishPortSet) {
        testSettingsBuilder.put(RemoteClusterPortSettings.PUBLISH_PORT.getKey(), publishPortValue);
    }
    boolean keepAliveSet = randomBoolean();
    boolean keepAliveValue = keepAliveSet ? randomBoolean() : NetworkService.TCP_KEEP_ALIVE.getDefault(Settings.EMPTY);
    if (keepAliveSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_KEEP_ALIVE, TransportSettings.TCP_KEEP_ALIVE, NetworkService.TCP_KEEP_ALIVE).getKey(), keepAliveValue);
    }
    boolean keepIdleSet = randomBoolean();
    int keepIdleValue = keepIdleSet ? randomInt(300) : NetworkService.TCP_KEEP_IDLE.getDefault(Settings.EMPTY);
    if (keepIdleSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_KEEP_IDLE, TransportSettings.TCP_KEEP_IDLE, NetworkService.TCP_KEEP_IDLE).getKey(), keepIdleValue);
    }
    boolean keepIntervalSet = randomBoolean();
    int keepIntervalValue = keepIntervalSet ? randomInt(300) : NetworkService.TCP_KEEP_INTERVAL.getDefault(Settings.EMPTY);
    if (keepIntervalSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_KEEP_INTERVAL, TransportSettings.TCP_KEEP_INTERVAL, NetworkService.TCP_KEEP_INTERVAL).getKey(), keepIntervalValue);
    }
    boolean keepCountSet = randomBoolean();
    int keepCountValue = keepCountSet ? randomInt(1000000) : NetworkService.TCP_KEEP_COUNT.getDefault(Settings.EMPTY);
    if (keepCountSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_KEEP_COUNT, TransportSettings.TCP_KEEP_COUNT, NetworkService.TCP_KEEP_COUNT).getKey(), keepCountValue);
    }
    boolean noDelaySet = randomBoolean();
    boolean noDelayValue = noDelaySet ? randomBoolean() : NetworkService.TCP_NO_DELAY.getDefault(Settings.EMPTY);
    if (noDelaySet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_NO_DELAY, TransportSettings.TCP_NO_DELAY).getKey(), noDelayValue);
    }
    boolean reuseAddressSet = randomBoolean();
    boolean reuseAddressValue = reuseAddressSet ? randomBoolean() : NetworkService.TCP_REUSE_ADDRESS.getDefault(Settings.EMPTY);
    if (reuseAddressSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_REUSE_ADDRESS, TransportSettings.TCP_REUSE_ADDRESS, NetworkService.TCP_REUSE_ADDRESS).getKey(), reuseAddressValue);
    }
    boolean sendBufferSizeSet = randomBoolean();
    int sendBufSizeBytes = randomInt(10_000_000);
    ByteSizeValue sendBufferSizeValue = sendBufferSizeSet ? ByteSizeValue.ofBytes(sendBufSizeBytes) : NetworkService.TCP_SEND_BUFFER_SIZE.getDefault(Settings.EMPTY);
    if (sendBufferSizeSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE, TransportSettings.TCP_SEND_BUFFER_SIZE, NetworkService.TCP_SEND_BUFFER_SIZE).getKey(), sendBufferSizeValue);
    }
    boolean receiveBufferSizeSet = randomBoolean();
    int rcvBufSizeBytes = randomInt(10_000_000);
    ByteSizeValue receiveBufferSizeValue = receiveBufferSizeSet ? ByteSizeValue.ofBytes(rcvBufSizeBytes) : NetworkService.TCP_RECEIVE_BUFFER_SIZE.getDefault(Settings.EMPTY);
    if (receiveBufferSizeSet) {
        testSettingsBuilder.put(randomFrom(RemoteClusterPortSettings.TCP_RECEIVE_BUFFER_SIZE, TransportSettings.TCP_RECEIVE_BUFFER_SIZE, NetworkService.TCP_RECEIVE_BUFFER_SIZE).getKey(), receiveBufferSizeValue);
    }
    Settings testSettings = testSettingsBuilder.build();
    TcpTransport.ProfileSettings profileSettings = RemoteClusterPortSettings.buildRemoteAccessProfileSettings(testSettings);
    assertThat(profileSettings.profileName, equalTo(REMOTE_CLUSTER_PROFILE));
    assertThat(profileSettings.bindHosts, contains(bindHostValue));
    assertThat(profileSettings.publishHosts, contains(publishHostValue));
    assertThat(profileSettings.portOrRange, equalTo(Integer.toString(portValue)));
    assertThat(profileSettings.publishPort, equalTo(publishPortValue));
    assertThat(profileSettings.tcpNoDelay, equalTo(noDelayValue));
    assertThat(profileSettings.tcpKeepAlive, equalTo(keepAliveValue));
    assertThat(profileSettings.tcpKeepIdle, equalTo(keepIdleValue));
    assertThat(profileSettings.tcpKeepInterval, equalTo(keepIntervalValue));
    assertThat(profileSettings.tcpKeepCount, equalTo(keepCountValue));
    assertThat(profileSettings.reuseAddress, equalTo(reuseAddressValue));
    assertThat(profileSettings.sendBufferSize, equalTo(sendBufferSizeValue));
    assertThat(profileSettings.receiveBufferSize, equalTo(receiveBufferSizeValue));
    assertThat(profileSettings.isDefaultProfile, equalTo(false));
}
263123.742177elasticsearch
public static Iterable<Object[]> parameters() {
    String read = "Attribute[channel=0]";
    Function<String, String> evaluatorName = s -> "ToLongFrom" + s + "Evaluator[field=" + read + "]";
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    TestCaseSupplier.forUnaryLong(suppliers, read, DataTypes.LONG, l -> l, Long.MIN_VALUE, Long.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataTypes.LONG, b -> b ? 1L : 0L, List.of());
    TestCaseSupplier.forUnaryDatetime(suppliers, read, DataTypes.LONG, Instant::toEpochMilli, List.of());
    TestCaseSupplier.forUnaryStrings(suppliers, evaluatorName.apply("String"), DataTypes.LONG, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + bytesRef.utf8ToString() + "]"));
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.LONG, Math::round, Long.MIN_VALUE, Long.MAX_VALUE, List.of());
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.LONG, d -> null, Double.NEGATIVE_INFINITY, Long.MIN_VALUE - 1d, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [long] range"));
    TestCaseSupplier.forUnaryDouble(suppliers, evaluatorName.apply("Double"), DataTypes.LONG, d -> null, Long.MAX_VALUE + 1d, Double.POSITIVE_INFINITY, d -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + d + "] out of [long] range"));
    TestCaseSupplier.forUnaryUnsignedLong(suppliers, evaluatorName.apply("UnsignedLong"), DataTypes.LONG, BigInteger::longValue, BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), List.of());
    TestCaseSupplier.forUnaryUnsignedLong(suppliers, evaluatorName.apply("UnsignedLong"), DataTypes.LONG, ul -> null, BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE), UNSIGNED_LONG_MAX, ul -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: [" + ul + "] out of [long] range"));
    TestCaseSupplier.forUnaryInt(suppliers, evaluatorName.apply("Int"), DataTypes.LONG, l -> (long) l, Integer.MIN_VALUE, Integer.MAX_VALUE, List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.longCases(Long.MIN_VALUE, Long.MAX_VALUE, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.LONG, bytesRef -> Long.valueOf(((BytesRef) bytesRef).utf8ToString()), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Long.MIN_VALUE, Long.MAX_VALUE, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.LONG, bytesRef -> Math.round(Double.parseDouble(((BytesRef) bytesRef).utf8ToString())), List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Double.NEGATIVE_INFINITY, Long.MIN_VALUE - 1d, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.LONG, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + ((BytesRef) bytesRef).utf8ToString() + "]"));
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("String"), TestCaseSupplier.doubleCases(Long.MAX_VALUE + 1d, Double.POSITIVE_INFINITY, true).stream().map(tds -> new TestCaseSupplier.TypedDataSupplier(tds.name() + "as string", () -> new BytesRef(tds.supplier().get().toString()), DataTypes.KEYWORD)).toList(), DataTypes.LONG, bytesRef -> null, bytesRef -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: org.elasticsearch.xpack.ql.InvalidArgumentException: Cannot parse number [" + ((BytesRef) bytesRef).utf8ToString() + "]"));
    TestCaseSupplier.unary(suppliers, "Attribute[channel=0]", List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomNonNegativeLong, EsqlDataTypes.COUNTER_LONG)), DataTypes.LONG, l -> l, List.of());
    TestCaseSupplier.unary(suppliers, evaluatorName.apply("Integer"), List.of(new TestCaseSupplier.TypedDataSupplier("counter", ESTestCase::randomInt, EsqlDataTypes.COUNTER_INTEGER)), DataTypes.LONG, l -> ((Integer) l).longValue(), List.of());
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
264976.623149elasticsearch
public void testCluster_GivenAnomalyDetectionJobAndTrainedModelDeployment_ShouldNotAllocateBothOnSameNode() throws Exception {
    internalCluster().ensureAtMostNumDataNodes(0);
    logger.info("Starting dedicated master node...");
    internalCluster().startMasterOnlyNode();
    logger.info("Starting dedicated data node...");
    internalCluster().startDataOnlyNode();
    logger.info("Starting dedicated ml node...");
    internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
    logger.info("Starting dedicated ml node...");
    internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
    ensureStableCluster();
    MlMemoryAction.Response memoryStats = client().execute(MlMemoryAction.INSTANCE, new MlMemoryAction.Request("ml:true")).actionGet();
    long maxNativeBytesPerNode = 0;
    for (MlMemoryAction.Response.MlMemoryStats stats : memoryStats.getNodes()) {
        maxNativeBytesPerNode = stats.getMlMax().getBytes();
    }
    String jobId = "test-node-goes-down-while-running-job";
    Job.Builder job = createJob(jobId, ByteSizeValue.ofBytes((long) (0.8 * maxNativeBytesPerNode)));
    PutJobAction.Request putJobRequest = new PutJobAction.Request(job);
    client().execute(PutJobAction.INSTANCE, putJobRequest).actionGet();
    client().execute(OpenJobAction.INSTANCE, new OpenJobAction.Request(job.getId())).actionGet();
    TrainedModelConfig model = TrainedModelConfig.builder().setModelId("test_model").setModelType(TrainedModelType.PYTORCH).setModelSize((long) (0.3 * maxNativeBytesPerNode)).setInferenceConfig(new PassThroughConfig(new VocabularyConfig(InferenceIndexConstants.nativeDefinitionStore()), null, null)).setLocation(new IndexLocation(InferenceIndexConstants.nativeDefinitionStore())).build();
    TrainedModelDefinitionDoc modelDefinitionDoc = new TrainedModelDefinitionDoc(new BytesArray(""), model.getModelId(), 0, model.getModelSize(), model.getModelSize(), 1, true);
    try (XContentBuilder builder = JsonXContent.contentBuilder()) {
        modelDefinitionDoc.toXContent(builder, null);
        client().execute(TransportIndexAction.TYPE, new IndexRequest(InferenceIndexConstants.nativeDefinitionStore()).source(builder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).actionGet();
    }
    client().execute(PutTrainedModelAction.INSTANCE, new PutTrainedModelAction.Request(model, true)).actionGet();
    client().execute(PutTrainedModelVocabularyAction.INSTANCE, new PutTrainedModelVocabularyAction.Request(model.getModelId(), List.of("these", "are", "my", "words", BertTokenizer.SEPARATOR_TOKEN, BertTokenizer.CLASS_TOKEN, BertTokenizer.UNKNOWN_TOKEN, BertTokenizer.PAD_TOKEN), List.of(), List.of(), false)).actionGet();
    logger.info("starting deployment: " + model.getModelId());
    client().execute(StartTrainedModelDeploymentAction.INSTANCE, new StartTrainedModelDeploymentAction.Request(model.getModelId(), model.getModelId())).actionGet();
    setMlIndicesDelayedNodeLeftTimeoutToZero();
    String jobNode = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet().getResponse().results().get(0).getNode().getName();
    String modelNode = client().execute(GetTrainedModelsStatsAction.INSTANCE, new GetTrainedModelsStatsAction.Request(model.getModelId())).actionGet().getResources().results().get(0).getDeploymentStats().getNodeStats().get(0).getNode().getName();
    assertThat(jobNode, not(equalTo(modelNode)));
    logger.info("Stopping both ml nodes...");
    assertThat(internalCluster().stopNode(jobNode), is(true));
    assertThat(internalCluster().stopNode(modelNode), is(true));
    assertBusy(() -> {
        GetJobsStatsAction.Response jobStats = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet();
        assertThat(jobStats.getResponse().results().get(0).getNode(), is(nullValue()));
    });
    assertBusy(() -> {
        GetTrainedModelsStatsAction.Response modelStats = client().execute(GetTrainedModelsStatsAction.INSTANCE, new GetTrainedModelsStatsAction.Request(model.getModelId())).actionGet();
        assertThat(modelStats.getResources().results().get(0).getDeploymentStats().getNodeStats(), is(empty()));
    });
    logger.info("Starting dedicated ml node...");
    String lastMlNodeName = internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
    ensureStableCluster();
    assertBusy(() -> {
        GetTrainedModelsStatsAction.Response modelStatsResponse = client().execute(GetTrainedModelsStatsAction.INSTANCE, new GetTrainedModelsStatsAction.Request(model.getModelId())).actionGet();
        GetTrainedModelsStatsAction.Response.TrainedModelStats modelStats = modelStatsResponse.getResources().results().get(0);
        GetJobsStatsAction.Response jobStatsResponse = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet();
        GetJobsStatsAction.Response.JobStats jobStats = jobStatsResponse.getResponse().results().get(0);
        boolean isModelAssigned = modelStats.getDeploymentStats().getNodeStats().isEmpty() == false;
        boolean isJobAssigned = jobStats.getNode() != null;
        assertThat(isJobAssigned ^ isModelAssigned, is(true));
        if (isJobAssigned) {
            assertThat(jobStats.getNode().getName(), equalTo(lastMlNodeName));
            assertThat(modelStats.getDeploymentStats().getReason(), containsString("insufficient available memory"));
        } else {
            assertThat(modelStats.getDeploymentStats().getNodeStats().get(0).getNode().getName(), equalTo(lastMlNodeName));
            assertThat(jobStats.getAssignmentExplanation(), containsString("insufficient available memory"));
        }
    });
    logger.info("Starting dedicated ml node...");
    internalCluster().startNode(onlyRoles(Set.of(DiscoveryNodeRole.ML_ROLE)));
    ensureStableCluster();
    assertBusy(() -> {
        GetTrainedModelsStatsAction.Response modelStatsResponse = client().execute(GetTrainedModelsStatsAction.INSTANCE, new GetTrainedModelsStatsAction.Request(model.getModelId())).actionGet();
        GetTrainedModelsStatsAction.Response.TrainedModelStats modelStats = modelStatsResponse.getResources().results().get(0);
        assertThat(modelStats.getDeploymentStats().getNodeStats().isEmpty(), is(false));
        GetJobsStatsAction.Response jobStatsResponse = client().execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(job.getId())).actionGet();
        GetJobsStatsAction.Response.JobStats jobStats = jobStatsResponse.getResponse().results().get(0);
        assertThat(jobStats.getNode(), is(notNullValue()));
        assertThat(jobStats.getNode(), is(not(equalTo(modelStats.getDeploymentStats().getNodeStats().get(0).getNode()))));
    });
    assertRecentLastTaskStateChangeTime(MlTasks.jobTaskId(jobId), Duration.of(10, ChronoUnit.SECONDS), null);
    client().execute(CloseJobAction.INSTANCE, new CloseJobAction.Request(jobId).setForce(true)).actionGet();
    client().execute(StopTrainedModelDeploymentAction.INSTANCE, new StopTrainedModelDeploymentAction.Request(model.getModelId())).actionGet();
}
262641.281175elasticsearch
public void testPruneToNextMainBranch() throws IOException {
    Field field1 = createKeywordFieldTestInstance("keyword1", 0);
    Field field2 = createKeywordFieldTestInstance("keyword2", 1);
    Field field3 = createKeywordFieldTestInstance("keyword3", 2);
    Field field2a = createKeywordFieldTestInstance("keyword2a", 3);
    Field field2b = createKeywordFieldTestInstance("keyword2b", 4);
    Field field2c = createKeywordFieldTestInstance("keyword2c", 5);
    EclatMapReducer eclat = new EclatMapReducer(getTestName(), 0.1, 2, 10, true);
    HashBasedTransactionStore transactionStore = eclat.mapInit(mockBigArrays());
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-A"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-B"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-C"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-D"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-E"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-F"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-G"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-c"), tuple(field2, "f2-1"), tuple(field3, "f3-H"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-d"), tuple(field2, "f2-1"), tuple(field3, "f3-I"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-b"), tuple(field2, "f2-1"), tuple(field3, "f3-J"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-f"), tuple(field2, "f2-1"), tuple(field3, "f3-K"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    eclat.map(mockOneDocument(List.of(tuple(field1, "f1-a"), tuple(field2, "f2-1"), tuple(field3, "f3-L"), tuple(field2a, "f2a-1"), tuple(field2b, "f2b-1"), tuple(field2c, "f2c-1"))), transactionStore);
    EclatMapReducer.EclatResult result = runEclat(eclat, List.of(field1, field2, field3, field2a, field2b, field2c), transactionStore);
    assertThat(result.getFrequentItemSets().length, equalTo(3));
    assertThat(result.getFrequentItemSets()[0].getSupport(), equalTo(1.0));
    assertThat(result.getFrequentItemSets()[1].getSupport(), equalTo(0.5));
    assertThat(result.getFrequentItemSets()[2].getSupport(), equalTo(0.25));
    assertThat(result.getProfilingInfo().get("unique_items_after_reduce"), equalTo(21L));
    assertThat(result.getProfilingInfo().get("total_transactions_after_reduce"), equalTo(12L));
    assertThat(result.getProfilingInfo().get("total_items_after_reduce"), equalTo(72L));
    assertThat(result.getProfilingInfo().get("item_sets_checked_eclat"), equalTo(47L));
}
264059.442155elasticsearch
public void testIteration() throws IOException {
    transactionStore = new HashBasedTransactionStore(mockBigArrays());
    Field field = createKeywordFieldTestInstance("field", 0);
    transactionStore.add(Stream.of(tuple(field, List.of("a", "d", "f")), tuple(field, List.of("a", "c", "d", "e")), tuple(field, List.of("b", "d")), tuple(field, List.of("b", "c", "d")), tuple(field, List.of("b", "c")), tuple(field, List.of("a", "b", "d")), tuple(field, List.of("b", "d", "e")), tuple(field, List.of("b", "c", "e", "g")), tuple(field, List.of("c", "d", "f")), tuple(field, List.of("a", "b", "d"))));
    transactionStore.prune(0.1);
    try (TopItemIds topItemIds = transactionStore.getTopItemIds()) {
        ItemSetTraverser it = new ItemSetTraverser(topItemIds);
        assertTrue(it.next());
        assertEquals("d", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(1, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.next());
        assertEquals("b", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(2, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.next());
        assertEquals("c", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(3, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(3));
        assertTrue(it.next());
        assertEquals("a", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(4));
        assertFalse(it.getParentItemSetBitSet().get(4));
        assertTrue(it.next());
        assertEquals("e", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertTrue(it.getParentItemSetBitSet().get(4));
        assertTrue(it.next());
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(6, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertTrue(it.getParentItemSetBitSet().get(5));
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(7, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(7));
        assertFalse(it.getParentItemSetBitSet().get(7));
        assertTrue(it.getParentItemSetBitSet().get(6));
        it.next();
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(6, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(7));
        assertFalse(it.getItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(7));
        assertTrue(it.next());
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(6));
        assertFalse(it.getItemSetBitSet().get(5));
        assertFalse(it.getItemSetBitSet().get(7));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(6, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(7));
        assertTrue(it.getItemSetBitSet().get(6));
        assertFalse(it.getItemSetBitSet().get(5));
        assertTrue(it.getParentItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(7));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("e", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(6, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.getItemSetBitSet().get(3));
        assertFalse(it.getItemSetBitSet().get(4));
        assertTrue(it.getItemSetBitSet().get(5));
        assertFalse(it.getItemSetBitSet().get(6));
        assertTrue(it.getItemSetBitSet().get(7));
        assertTrue(it.getParentItemSetBitSet().get(1));
        assertTrue(it.getParentItemSetBitSet().get(2));
        assertTrue(it.getParentItemSetBitSet().get(3));
        assertFalse(it.getParentItemSetBitSet().get(4));
        assertTrue(it.getParentItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(7));
        assertTrue(it.next());
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.getItemSetBitSet().get(3));
        assertFalse(it.getItemSetBitSet().get(4));
        assertFalse(it.getItemSetBitSet().get(5));
        assertFalse(it.getItemSetBitSet().get(6));
        assertTrue(it.getItemSetBitSet().get(7));
        assertTrue(it.getParentItemSetBitSet().get(1));
        assertTrue(it.getParentItemSetBitSet().get(2));
        assertTrue(it.getParentItemSetBitSet().get(3));
        assertFalse(it.getParentItemSetBitSet().get(4));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertFalse(it.getParentItemSetBitSet().get(7));
        int furtherSteps = 0;
        while (it.next()) {
            ++furtherSteps;
        }
        assertEquals(109, furtherSteps);
    }
}
262442.421185elasticsearch
public void testCheckIfOneNodeCouldBeRemovedMemoryOnly() {
    assertEquals(true, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_c", List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 600L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(false, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(280L, 0), MlJobRequirements.of(300L, 0)), "node_c", List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 600L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(false, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 600L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(false, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Collections.emptyMap(), 999L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(false, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(280L, 0), MlJobRequirements.of(300L, 0)), "node_c", List.of(MlJobRequirements.of(500L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(true, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(280L, 0), MlJobRequirements.of(300L, 0)), "node_c", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(true, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(280L, 0), MlJobRequirements.of(300L, 0)), "node_c", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(50L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
    assertEquals(true, MlAutoscalingResourceTracker.checkIfOneNodeCouldBeRemoved(Map.of("node_a", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(200L, 0), MlJobRequirements.of(300L, 0)), "node_b", List.of(MlJobRequirements.of(280L, 0), MlJobRequirements.of(325L, 0)), "node_c", List.of(MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(100L, 0), MlJobRequirements.of(50L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0), MlJobRequirements.of(10L, 0))), 1000L, 10, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE, MlDummyAutoscalingEntity.of(0L, 0)));
}
262743.2225124elasticsearch
public void seekExact(long ord) {
    throw new UnsupportedOperationException();
}
262743.2225124elasticsearch
public void seekExact(long ord) {
    throw new UnsupportedOperationException();
}
262743.2225124elasticsearch
public void seekExact(long ord) {
    throw new UnsupportedOperationException();
}
263542.2415142elasticsearch
public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() throws Exception {
    final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createAndPopulateIndex(indexName, Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_SOFT_DELETES_SETTING.getKey(), true));
    final TotalHits originalAllHits = SearchResponseUtils.getTotalHits(internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true));
    final TotalHits originalBarHits = SearchResponseUtils.getTotalHits(internalCluster().client().prepareSearch(indexName).setTrackTotalHits(true).setQuery(matchQuery("foo", "bar")));
    logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits);
    final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final boolean hasRepositoryUuid = randomBoolean();
    if (hasRepositoryUuid) {
        createRepository(repositoryName, "fs");
    } else {
        final String tmpRepositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
        createRepositoryNoVerify(tmpRepositoryName, "fs");
        final Path repoPath = internalCluster().getCurrentMasterNodeInstance(Environment.class).resolveRepoFile(clusterAdmin().prepareGetRepositories(tmpRepositoryName).get().repositories().get(0).settings().get("location"));
        initWithSnapshotVersion(tmpRepositoryName, repoPath, randomFrom(SnapshotsService.OLD_SNAPSHOT_FORMAT, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION, SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION));
        assertAcked(clusterAdmin().prepareDeleteRepository(tmpRepositoryName));
        createRepository(repositoryName, "fs", repoPath);
    }
    final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId();
    for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshotOne.getName()).get().getSnapshots()) {
        for (final SnapshotIndexShardStatus snapshotIndexShardStatus : snapshotStatus.getShards()) {
            final SnapshotStats stats = snapshotIndexShardStatus.getStats();
            assertThat(stats.getIncrementalFileCount(), greaterThan(1));
            assertThat(stats.getProcessedFileCount(), greaterThan(1));
            assertThat(stats.getTotalFileCount(), greaterThan(1));
        }
    }
    assertAcked(indicesAdmin().prepareDelete(indexName));
    assertThat(clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0).uuid(), hasRepositoryUuid ? not(equalTo(RepositoryData.MISSING_UUID)) : equalTo(RepositoryData.MISSING_UUID));
    final String restoredIndexName = randomValueOtherThan(indexName, () -> randomAlphaOfLength(10).toLowerCase(Locale.ROOT));
    mountSnapshot(repositoryName, snapshotOne.getName(), indexName, restoredIndexName, Settings.EMPTY);
    ensureGreen(restoredIndexName);
    if (randomBoolean()) {
        logger.info("--> closing index before snapshot");
        assertAcked(indicesAdmin().prepareClose(restoredIndexName));
    }
    final String backupRepositoryName;
    if (randomBoolean()) {
        backupRepositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
        createRepository(backupRepositoryName, "fs");
    } else {
        backupRepositoryName = repositoryName;
    }
    logger.info("--> starting to take snapshot-2");
    final SnapshotId snapshotTwo = createSnapshot(backupRepositoryName, "snapshot-2", List.of(restoredIndexName)).snapshotId();
    logger.info("--> finished taking snapshot-2");
    for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(backupRepositoryName).setSnapshots(snapshotTwo.getName()).get().getSnapshots()) {
        assertThat(snapshotStatus.getIndices().size(), equalTo(1));
        assertTrue(snapshotStatus.getIndices().containsKey(restoredIndexName));
        for (final SnapshotIndexShardStatus snapshotIndexShardStatus : snapshotStatus.getShards()) {
            final SnapshotStats stats = snapshotIndexShardStatus.getStats();
            assertThat(stats.getIncrementalFileCount(), equalTo(0));
            assertThat(stats.getProcessedFileCount(), equalTo(0));
            assertThat(stats.getTotalFileCount(), equalTo(0));
        }
    }
    assertAcked(indicesAdmin().prepareDelete(restoredIndexName));
    final String restoreRepositoryName;
    if (hasRepositoryUuid && randomBoolean()) {
        final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0);
        final String newRepositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
        assertAcked(clusterAdmin().prepareDeleteRepository(repositoryName));
        final Settings.Builder settings = Settings.builder().put(repositoryMetadata.settings());
        if (randomBoolean()) {
            settings.put(READONLY_SETTING_KEY, "true");
        }
        assertAcked(clusterAdmin().preparePutRepository(newRepositoryName).setType("fs").setSettings(settings).setVerify(randomBoolean()));
        restoreRepositoryName = backupRepositoryName.equals(repositoryName) ? newRepositoryName : backupRepositoryName;
    } else {
        restoreRepositoryName = backupRepositoryName;
    }
    logger.info("--> starting to restore snapshot-2");
    assertThat(clusterAdmin().prepareRestoreSnapshot(restoreRepositoryName, snapshotTwo.getName()).setIndices(restoredIndexName).get().status(), equalTo(RestStatus.ACCEPTED));
    ensureGreen(restoredIndexName);
    logger.info("--> finished restoring snapshot-2");
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    internalCluster().fullRestart();
    ensureGreen(restoredIndexName);
    assertTotalHits(restoredIndexName, originalAllHits, originalBarHits);
    final IllegalArgumentException remountException = expectThrows(IllegalArgumentException.class, () -> {
        try {
            mountSnapshot(restoreRepositoryName, snapshotTwo.getName(), restoredIndexName, randomAlphaOfLength(10).toLowerCase(Locale.ROOT), Settings.EMPTY);
        } catch (Exception e) {
            final Throwable cause = ExceptionsHelper.unwrap(e, IllegalArgumentException.class);
            throw cause == null ? e : cause;
        }
    });
    assertThat(remountException.getMessage(), allOf(containsString("is a snapshot of a searchable snapshot index backed by index"), containsString(repositoryName), containsString(snapshotOne.getName()), containsString(indexName), containsString(restoreRepositoryName), containsString(snapshotTwo.getName()), containsString(restoredIndexName), containsString("cannot be mounted; did you mean to restore it instead?")));
}
264596.0611130elasticsearch
public void testDefaultOptionsWithSigningAndMultipleEncryptionKeys() throws Exception {
    assumeFalse("Can't run in a FIPS JVM, PKCS12 keystores are not usable", inFipsJvm());
    final KeyStoreWrapper usedKeyStore = randomFrom(keyStore, passwordProtectedKeystore);
    final Path dir = createTempDir();
    final Path ksEncryptionFile = dir.resolve("saml-encryption.p12");
    final Tuple<java.security.cert.X509Certificate, PrivateKey> certEncKeyPair1 = readKeyPair("RSA_2048");
    final Tuple<java.security.cert.X509Certificate, PrivateKey> certEncKeyPair2 = readKeyPair("RSA_4096");
    final KeyStore ksEncrypt = KeyStore.getInstance("PKCS12");
    ksEncrypt.load(null);
    ksEncrypt.setKeyEntry(getAliasName(certEncKeyPair1), certEncKeyPair1.v2(), "key-password".toCharArray(), new Certificate[] { certEncKeyPair1.v1() });
    ksEncrypt.setKeyEntry(getAliasName(certEncKeyPair2), certEncKeyPair2.v2(), "key-password".toCharArray(), new Certificate[] { certEncKeyPair2.v1() });
    try (OutputStream out = Files.newOutputStream(ksEncryptionFile)) {
        ksEncrypt.store(out, "ks-password".toCharArray());
    }
    final Path ksSigningFile = dir.resolve("saml-signing.p12");
    final Tuple<java.security.cert.X509Certificate, PrivateKey> certKeyPairSign = readRandomKeyPair("RSA");
    final KeyStore ksSign = KeyStore.getInstance("PKCS12");
    ksSign.load(null);
    ksSign.setKeyEntry(getAliasName(certKeyPairSign), certKeyPairSign.v2(), "key-password".toCharArray(), new Certificate[] { certKeyPairSign.v1() });
    try (OutputStream out = Files.newOutputStream(ksSigningFile)) {
        ksSign.store(out, "ks-password".toCharArray());
    }
    final MockSecureSettings secureSettings = new MockSecureSettings();
    secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.secure_password", "ks-password");
    secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.secure_key_password", "key-password");
    secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.secure_password", "ks-password");
    secureSettings.setString(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.secure_key_password", "key-password");
    final SamlMetadataCommand command = new SamlMetadataCommand((e) -> usedKeyStore);
    final OptionSet options = command.getParser().parse(new String[0]);
    final boolean useSigningCredentials = randomBoolean();
    final boolean useEncryptionCredentials = randomBoolean();
    final Settings.Builder settingsBuilder = Settings.builder().put("path.home", dir).put(RealmSettings.PREFIX + "saml.my_saml.type", "saml").put(RealmSettings.PREFIX + "saml.my_saml.order", 1).put(RealmSettings.PREFIX + "saml.my_saml.idp.entity_id", "https://okta.my.corp/").put(RealmSettings.PREFIX + "saml.my_saml.sp.entity_id", "https://kibana.my.corp/").put(RealmSettings.PREFIX + "saml.my_saml.sp.acs", "https://kibana.my.corp/saml/login").put(RealmSettings.PREFIX + "saml.my_saml.sp.logout", "https://kibana.my.corp/saml/logout").put(RealmSettings.PREFIX + "saml.my_saml.attributes.principal", "urn:oid:0.9.2342.19200300.100.1.1");
    settingsBuilder.setSecureSettings(secureSettings);
    if (useSigningCredentials) {
        settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.path", ksSigningFile.toString());
        settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.signing.keystore.type", "PKCS12");
    }
    if (useEncryptionCredentials) {
        settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.path", ksEncryptionFile.toString());
        settingsBuilder.put(RealmSettings.PREFIX + "saml.my_saml.encryption.keystore.type", "PKCS12");
    }
    final Settings settings = settingsBuilder.build();
    final Environment env = TestEnvironment.newEnvironment(settings);
    final MockTerminal terminal = getTerminalPossiblyWithPassword(usedKeyStore);
    terminal.addTextInput("");
    final EntityDescriptor descriptor = command.buildEntityDescriptor(terminal, options, env);
    assertThat(descriptor, notNullValue());
    assertThat(descriptor.getEntityID(), equalTo("https://kibana.my.corp/"));
    assertThat(descriptor.getRoleDescriptors(), iterableWithSize(1));
    assertThat(descriptor.getRoleDescriptors().get(0), instanceOf(SPSSODescriptor.class));
    final SPSSODescriptor spDescriptor = (SPSSODescriptor) descriptor.getRoleDescriptors().get(0);
    assertThat(spDescriptor.getAssertionConsumerServices(), iterableWithSize(1));
    assertThat(spDescriptor.getAssertionConsumerServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/login"));
    assertThat(spDescriptor.getAssertionConsumerServices().get(0).isDefault(), equalTo(true));
    assertThat(spDescriptor.getAssertionConsumerServices().get(0).getIndex(), equalTo(1));
    assertThat(spDescriptor.getAssertionConsumerServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_POST_BINDING_URI));
    assertThat(spDescriptor.getAttributeConsumingServices(), iterableWithSize(1));
    assertThat(spDescriptor.getAttributeConsumingServices().get(0).isDefault(), equalTo(true));
    assertThat(spDescriptor.getAttributeConsumingServices().get(0).getIndex(), equalTo(1));
    assertThat(spDescriptor.getAttributeConsumingServices().get(0).getRequestedAttributes(), iterableWithSize(1));
    final RequestedAttribute uidAttribute = spDescriptor.getAttributeConsumingServices().get(0).getRequestedAttributes().get(0);
    assertThat(uidAttribute.getName(), equalTo("urn:oid:0.9.2342.19200300.100.1.1"));
    assertThat(uidAttribute.getFriendlyName(), equalTo("principal"));
    assertThat(spDescriptor.getSingleLogoutServices(), iterableWithSize(1));
    assertThat(spDescriptor.getSingleLogoutServices().get(0).getLocation(), equalTo("https://kibana.my.corp/saml/logout"));
    assertThat(spDescriptor.getSingleLogoutServices().get(0).getBinding(), equalTo(SAMLConstants.SAML2_REDIRECT_BINDING_URI));
    assertThat(spDescriptor.isAuthnRequestsSigned(), equalTo(useSigningCredentials));
    assertThat(spDescriptor.getWantAssertionsSigned(), equalTo(true));
    int expectedKeyDescriptorSize = (useSigningCredentials) ? 1 : 0;
    expectedKeyDescriptorSize = (useEncryptionCredentials) ? expectedKeyDescriptorSize + 2 : expectedKeyDescriptorSize;
    assertThat(spDescriptor.getKeyDescriptors(), iterableWithSize(expectedKeyDescriptorSize));
    if (expectedKeyDescriptorSize > 0) {
        final Set<java.security.cert.X509Certificate> encryptionCertificatesToMatch = new HashSet<>();
        if (useEncryptionCredentials) {
            encryptionCertificatesToMatch.add(certEncKeyPair1.v1());
            encryptionCertificatesToMatch.add(certEncKeyPair2.v1());
        }
        spDescriptor.getKeyDescriptors().forEach((keyDesc) -> {
            UsageType usageType = keyDesc.getUse();
            final List<X509Data> x509 = keyDesc.getKeyInfo().getX509Datas();
            assertThat(x509, iterableWithSize(1));
            assertThat(x509.get(0).getX509Certificates(), iterableWithSize(1));
            final X509Certificate xmlCert = x509.get(0).getX509Certificates().get(0);
            final java.security.cert.X509Certificate javaCert;
            try {
                javaCert = KeyInfoSupport.getCertificate(xmlCert);
            } catch (CertificateException ce) {
                throw ExceptionsHelper.convertToRuntime(ce);
            }
            if (usageType == UsageType.SIGNING) {
                assertTrue("Found UsageType as SIGNING in SP metadata when not testing for signing credentials", useSigningCredentials);
                assertEquals("Signing Certificate from SP metadata does not match", certKeyPairSign.v1(), javaCert);
            } else if (usageType == UsageType.ENCRYPTION) {
                assertTrue(useEncryptionCredentials);
                assertTrue("Encryption Certificate was not found in encryption certificates", encryptionCertificatesToMatch.remove(javaCert));
            } else {
                fail("Usage type should have been either SIGNING or ENCRYPTION");
            }
        });
        if (useEncryptionCredentials) {
            assertTrue("Did not find all encryption certificates in exported SP metadata", encryptionCertificatesToMatch.isEmpty());
        }
    }
}
261613.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
261613.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
261613.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
261516.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
261516.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
261516.3238130gwt
protected static DateTimeFormat getFormat(String pattern, DateTimeFormatInfo dtfi) {
    DateTimeFormatInfo defaultDtfi = getDefaultDateTimeFormatInfo();
    DateTimeFormat dtf = null;
    if (dtfi == defaultDtfi) {
        dtf = cache.get(pattern);
    }
    if (dtf == null) {
        dtf = new DateTimeFormat(pattern, dtfi);
        if (dtfi == defaultDtfi) {
            cache.put(pattern, dtf);
        }
    }
    return dtf;
}
262498.425134hadoop
public boolean managementOperation(AuthenticationToken token, HttpServletRequest request, HttpServletResponse response) throws IOException, AuthenticationException {
    boolean requestContinues = true;
    LOG.trace("Processing operation for req=({}), token: {}", request, token);
    String op = ServletUtils.getParameter(request, KerberosDelegationTokenAuthenticator.OP_PARAM);
    op = (op != null) ? StringUtils.toUpperCase(op) : null;
    if (isManagementOperation(request)) {
        KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp = KerberosDelegationTokenAuthenticator.DelegationTokenOperation.valueOf(op);
        if (dtOp.getHttpMethod().equals(request.getMethod())) {
            boolean doManagement;
            if (dtOp.requiresKerberosCredentials() && token == null) {
                token = authHandler.authenticate(request, response);
                LOG.trace("Got token: {}.", token);
                if (token == null) {
                    requestContinues = false;
                    doManagement = false;
                } else {
                    doManagement = true;
                }
            } else {
                doManagement = true;
            }
            if (doManagement) {
                UserGroupInformation requestUgi = (token != null) ? UserGroupInformation.createRemoteUser(token.getUserName()) : null;
                String doAsUser = DelegationTokenAuthenticationFilter.getDoAs(request);
                if (requestUgi != null && doAsUser != null) {
                    requestUgi = UserGroupInformation.createProxyUser(doAsUser, requestUgi);
                    try {
                        ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
                    } catch (AuthorizationException ex) {
                        HttpExceptionUtils.createServletExceptionResponse(response, HttpServletResponse.SC_FORBIDDEN, ex);
                        return false;
                    }
                }
                Map map = null;
                switch(dtOp) {
                    case GETDELEGATIONTOKEN:
                        if (requestUgi == null) {
                            throw new IllegalStateException("request UGI cannot be NULL");
                        }
                        String renewer = ServletUtils.getParameter(request, KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
                        String service = ServletUtils.getParameter(request, KerberosDelegationTokenAuthenticator.SERVICE_PARAM);
                        try {
                            Token<?> dToken = tokenManager.createToken(requestUgi, renewer, service);
                            map = delegationTokenToJSON(dToken);
                        } catch (IOException ex) {
                            throw new AuthenticationException(ex.toString(), ex);
                        }
                        break;
                    case RENEWDELEGATIONTOKEN:
                        if (requestUgi == null) {
                            throw new IllegalStateException("request UGI cannot be NULL");
                        }
                        String tokenToRenew = ServletUtils.getParameter(request, KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
                        if (tokenToRenew == null) {
                            response.sendError(HttpServletResponse.SC_BAD_REQUEST, MessageFormat.format("Operation [{0}] requires the parameter [{1}]", dtOp, KerberosDelegationTokenAuthenticator.TOKEN_PARAM));
                            requestContinues = false;
                        } else {
                            Token<AbstractDelegationTokenIdentifier> dt = new Token();
                            try {
                                dt.decodeFromUrlString(tokenToRenew);
                                long expirationTime = tokenManager.renewToken(dt, requestUgi.getShortUserName());
                                map = Collections.singletonMap("long", expirationTime);
                            } catch (IOException ex) {
                                throw new AuthenticationException(ex.toString(), ex);
                            }
                        }
                        break;
                    case CANCELDELEGATIONTOKEN:
                        String tokenToCancel = ServletUtils.getParameter(request, KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
                        if (tokenToCancel == null) {
                            response.sendError(HttpServletResponse.SC_BAD_REQUEST, MessageFormat.format("Operation [{0}] requires the parameter [{1}]", dtOp, KerberosDelegationTokenAuthenticator.TOKEN_PARAM));
                            requestContinues = false;
                        } else {
                            Token<AbstractDelegationTokenIdentifier> dt = new Token();
                            try {
                                dt.decodeFromUrlString(tokenToCancel);
                                tokenManager.cancelToken(dt, (requestUgi != null) ? requestUgi.getShortUserName() : null);
                            } catch (IOException ex) {
                                response.sendError(HttpServletResponse.SC_NOT_FOUND, "Invalid delegation token, cannot cancel");
                                requestContinues = false;
                            }
                        }
                        break;
                }
                if (requestContinues) {
                    response.setStatus(HttpServletResponse.SC_OK);
                    if (map != null) {
                        response.setContentType(MediaType.APPLICATION_JSON);
                        Writer writer = response.getWriter();
                        ObjectMapper jsonMapper = new ObjectMapper(jsonFactory);
                        jsonMapper.writeValue(writer, map);
                        writer.write(ENTER);
                        writer.flush();
                    }
                    requestContinues = false;
                }
            }
        } else {
            response.sendError(HttpServletResponse.SC_BAD_REQUEST, MessageFormat.format("Wrong HTTP method [{0}] for operation [{1}], it should be " + "[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
            requestContinues = false;
        }
    }
    return requestContinues;
}
261488.6212186hadoop
public void testRawXAttrs() throws Exception {
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
    fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
    {
        final byte[] value = fs.getXAttr(rawPath, raw1);
        Assert.assertArrayEquals(value, value1);
    }
    {
        final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
        Assert.assertEquals(xattrs.size(), 1);
        Assert.assertArrayEquals(value1, xattrs.get(raw1));
        fs.removeXAttr(rawPath, raw1);
    }
    {
        fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
        fs.setXAttr(rawPath, raw1, newValue1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
        final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
        Assert.assertEquals(xattrs.size(), 1);
        Assert.assertArrayEquals(newValue1, xattrs.get(raw1));
        fs.removeXAttr(rawPath, raw1);
    }
    {
        fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
        fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
        final List<String> xattrNames = fs.listXAttrs(rawPath);
        assertTrue(xattrNames.contains(raw1));
        assertTrue(xattrNames.contains(raw2));
        assertTrue(xattrNames.size() == 2);
        fs.removeXAttr(rawPath, raw1);
        fs.removeXAttr(rawPath, raw2);
    }
    {
        fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
        fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
        final List<String> xattrNames = fs.listXAttrs(path);
        assertTrue(xattrNames.size() == 0);
        fs.removeXAttr(rawPath, raw1);
        fs.removeXAttr(rawPath, raw2);
    }
    {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                try {
                    userFs.setXAttr(path, raw1, value1);
                    fail("setXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.setXAttr(rawPath, raw1, value1);
                    fail("setXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.getXAttrs(rawPath);
                    fail("getXAttrs should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.getXAttrs(path);
                    fail("getXAttrs should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.getXAttr(rawPath, raw1);
                    fail("getXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.getXAttr(path, raw1);
                    fail("getXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                return null;
            }
        });
    }
    {
        fs.setXAttr(rawPath, raw1, value1);
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                try {
                    userFs.getXAttr(rawPath, raw1);
                    fail("getXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.getXAttr(path, raw1);
                    fail("getXAttr should have thrown");
                } catch (AccessControlException e) {
                }
                try {
                    userFs.listXAttrs(path);
                    fail("listXAttr should have thrown AccessControlException");
                } catch (AccessControlException ace) {
                }
                try {
                    userFs.listXAttrs(rawPath);
                    fail("listXAttr should have thrown AccessControlException");
                } catch (AccessControlException ace) {
                }
                return null;
            }
        });
        fs.setPermission(path, new FsPermission((short) 0751));
        final Path childDir = new Path(path, "child" + pathCount);
        FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0704));
        final Path rawChildDir = new Path("/.reserved/raw" + childDir.toString());
        fs.setXAttr(rawChildDir, raw1, value1);
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                List<String> xattrs = userFs.listXAttrs(rawChildDir);
                assertEquals(1, xattrs.size());
                assertEquals(raw1, xattrs.get(0));
                return null;
            }
        });
        fs.removeXAttr(rawPath, raw1);
    }
    {
        Path parentPath = new Path("/foo");
        fs.mkdirs(parentPath);
        fs.setOwner(parentPath, "user", "mygroup");
        fs.setPermission(parentPath, new FsPermission("701"));
        Path childPath = new Path("/foo/bar");
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final DistributedFileSystem dfs = dfsCluster.getFileSystem();
                DFSTestUtil.createFile(dfs, childPath, 1024, (short) 1, 0xFEED);
                dfs.setPermission(childPath, new FsPermission("740"));
                return null;
            }
        });
        Path rawChildPath = new Path("/.reserved/raw" + childPath.toString());
        fs.setXAttr(new Path("/.reserved/raw/foo/bar"), raw1, value1);
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final DistributedFileSystem dfs = dfsCluster.getFileSystem();
                byte[] xattr = dfs.getXAttr(rawChildPath, raw1);
                assertEquals(Arrays.toString(value1), Arrays.toString(xattr));
                return null;
            }
        });
        final UserGroupInformation fakeUser = UserGroupInformation.createUserForTesting("fakeUser", new String[] { "fakeGroup" });
        fakeUser.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final DistributedFileSystem dfs = dfsCluster.getFileSystem();
                try {
                    dfs.getXAttr(path, raw1);
                    fail("should have thrown AccessControlException");
                } catch (AccessControlException ace) {
                }
                return null;
            }
        });
    }
}
261999.4321149hadoop
public void testBasicPoolOperations() throws Exception {
    final String poolName = "pool1";
    CachePoolInfo info = new CachePoolInfo(poolName).setOwnerName("bob").setGroupName("bobgroup").setMode(new FsPermission((short) 0755)).setLimit(150l);
    dfs.addCachePool(info);
    try {
        dfs.addCachePool(info);
        fail("added the pool with the same name twice");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("pool1 already exists", ioe);
    }
    try {
        dfs.addCachePool(new CachePoolInfo(""));
        fail("added empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.addCachePool(null);
        fail("added null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    try {
        proto.addCachePool(new CachePoolInfo(""));
        fail("added empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.addCachePool(null);
        fail("added null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    info.setOwnerName("jane").setGroupName("janegroup").setMode(new FsPermission((short) 0700)).setLimit(314l);
    dfs.modifyCachePool(info);
    try {
        dfs.modifyCachePool(new CachePoolInfo("fool"));
        fail("modified non-existent cache pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("fool does not exist", ioe);
    }
    try {
        dfs.modifyCachePool(new CachePoolInfo(""));
        fail("modified empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.modifyCachePool(null);
        fail("modified null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    try {
        proto.modifyCachePool(new CachePoolInfo(""));
        fail("modified empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.modifyCachePool(null);
        fail("modified null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("CachePoolInfo is null", ioe);
    }
    dfs.removeCachePool(poolName);
    try {
        dfs.removeCachePool("pool99");
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe);
    }
    try {
        dfs.removeCachePool(poolName);
        fail("expected to get an exception when " + "removing a non-existent pool.");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Cannot remove " + "non-existent cache pool", ioe);
    }
    try {
        dfs.removeCachePool("");
        fail("removed empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        dfs.removeCachePool(null);
        fail("removed null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.removeCachePool("");
        fail("removed empty pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    try {
        proto.removeCachePool(null);
        fail("removed null pool");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("invalid empty cache pool name", ioe);
    }
    info = new CachePoolInfo("pool2");
    dfs.addCachePool(info);
    DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
    dfs1.close();
    try {
        dfs1.listCachePools();
        fail("listCachePools using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.addCachePool(info);
        fail("addCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.modifyCachePool(info);
        fail("modifyCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
    try {
        dfs1.removeCachePool(poolName);
        fail("removeCachePool using a closed filesystem!");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
    }
}
263621.516149hadoop
public void testUpgrade() throws Exception {
    File[] baseDirs;
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
        conf = new HdfsConfiguration();
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
        conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
        log("Normal NameNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        try {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            dfs.setSafeMode(SafeModeAction.ENTER);
            dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
            fail();
        } catch (RemoteException re) {
            assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
            LOG.info("The exception is expected.", re);
        }
        checkNameNode(nameNodeDirs, EXPECTED_TXID);
        if (numDirs > 1)
            TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("Normal DataNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
        checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode upgrade with existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("DataNode upgrade with existing previous dir", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
        checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode upgrade with future stored layout version in current", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
        startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("DataNode upgrade with newer fsscTime in current", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
        storageInfo = new StorageInfo(DataNodeLayoutVersion.getCurrentLayoutVersion(), UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
        UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster), conf);
        startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        UpgradeUtilities.createEmptyDirs(dataNodeDirs);
        log("NameNode upgrade with no edits file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with no image file", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with corrupt version file", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        for (File f : baseDirs) {
            UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(StandardCharsets.UTF_8), "xxxxxxxxxxxxx".getBytes(StandardCharsets.UTF_8));
        }
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with old layout version in current", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(null));
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
        log("NameNode upgrade with future layout version in current", numDirs);
        baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
        UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(null));
        startNameNodeShouldFail(StartupOption.UPGRADE);
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
    int numDirs = 4;
    {
        conf = new HdfsConfiguration();
        conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
        conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
        conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
        String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
        log("Normal NameNode upgrade", numDirs);
        UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
        cluster = createCluster();
        try {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            dfs.setSafeMode(SafeModeAction.ENTER);
            dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
            fail();
        } catch (RemoteException re) {
            assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
            LOG.info("The exception is expected.", re);
        }
        checkNameNode(nameNodeDirs, EXPECTED_TXID);
        TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
        cluster.shutdown();
        UpgradeUtilities.createEmptyDirs(nameNodeDirs);
    }
}
264120.9623120hadoop
public static void doJobControlTest() throws Exception {
    Configuration defaults = new Configuration();
    FileSystem fs = FileSystem.get(defaults);
    Path rootDataDir = new Path(System.getProperty("test.build.data", "."), "TestJobControlData");
    Path indir = new Path(rootDataDir, "indir");
    Path outdir_1 = new Path(rootDataDir, "outdir_1");
    Path outdir_2 = new Path(rootDataDir, "outdir_2");
    Path outdir_3 = new Path(rootDataDir, "outdir_3");
    Path outdir_4 = new Path(rootDataDir, "outdir_4");
    JobControlTestUtils.cleanData(fs, indir);
    JobControlTestUtils.generateData(fs, indir);
    JobControlTestUtils.cleanData(fs, outdir_1);
    JobControlTestUtils.cleanData(fs, outdir_2);
    JobControlTestUtils.cleanData(fs, outdir_3);
    JobControlTestUtils.cleanData(fs, outdir_4);
    ArrayList<Job> dependingJobs = null;
    ArrayList<Path> inPaths_1 = new ArrayList<Path>();
    inPaths_1.add(indir);
    JobConf jobConf_1 = JobControlTestUtils.createCopyJob(inPaths_1, outdir_1);
    Job job_1 = new Job(jobConf_1, dependingJobs);
    ArrayList<Path> inPaths_2 = new ArrayList<Path>();
    inPaths_2.add(indir);
    JobConf jobConf_2 = JobControlTestUtils.createCopyJob(inPaths_2, outdir_2);
    Job job_2 = new Job(jobConf_2, dependingJobs);
    ArrayList<Path> inPaths_3 = new ArrayList<Path>();
    inPaths_3.add(outdir_1);
    inPaths_3.add(outdir_2);
    JobConf jobConf_3 = JobControlTestUtils.createCopyJob(inPaths_3, outdir_3);
    dependingJobs = new ArrayList<Job>();
    dependingJobs.add(job_1);
    dependingJobs.add(job_2);
    Job job_3 = new Job(jobConf_3, dependingJobs);
    ArrayList<Path> inPaths_4 = new ArrayList<Path>();
    inPaths_4.add(outdir_3);
    JobConf jobConf_4 = JobControlTestUtils.createCopyJob(inPaths_4, outdir_4);
    dependingJobs = new ArrayList<Job>();
    dependingJobs.add(job_3);
    Job job_4 = new Job(jobConf_4, dependingJobs);
    JobControl theControl = new JobControl("Test");
    theControl.addJob((ControlledJob) job_1);
    theControl.addJob((ControlledJob) job_2);
    theControl.addJob(job_3);
    theControl.addJob(job_4);
    Thread theController = new Thread(theControl);
    theController.start();
    while (!theControl.allFinished()) {
        System.out.println("Jobs in waiting state: " + theControl.getWaitingJobs().size());
        System.out.println("Jobs in ready state: " + theControl.getReadyJobs().size());
        System.out.println("Jobs in running state: " + theControl.getRunningJobs().size());
        System.out.println("Jobs in success state: " + theControl.getSuccessfulJobs().size());
        System.out.println("Jobs in failed state: " + theControl.getFailedJobs().size());
        System.out.println("\n");
        try {
            Thread.sleep(5000);
        } catch (Exception e) {
        }
    }
    System.out.println("Jobs are all done???");
    System.out.println("Jobs in waiting state: " + theControl.getWaitingJobs().size());
    System.out.println("Jobs in ready state: " + theControl.getReadyJobs().size());
    System.out.println("Jobs in running state: " + theControl.getRunningJobs().size());
    System.out.println("Jobs in success state: " + theControl.getSuccessfulJobs().size());
    System.out.println("Jobs in failed state: " + theControl.getFailedJobs().size());
    System.out.println("\n");
    if (job_1.getState() != Job.FAILED && job_1.getState() != Job.DEPENDENT_FAILED && job_1.getState() != Job.SUCCESS) {
        String states = "job_1:  " + job_1.getState() + "\n";
        throw new Exception("The state of job_1 is not in a complete state\n" + states);
    }
    if (job_2.getState() != Job.FAILED && job_2.getState() != Job.DEPENDENT_FAILED && job_2.getState() != Job.SUCCESS) {
        String states = "job_2:  " + job_2.getState() + "\n";
        throw new Exception("The state of job_2 is not in a complete state\n" + states);
    }
    if (job_3.getState() != Job.FAILED && job_3.getState() != Job.DEPENDENT_FAILED && job_3.getState() != Job.SUCCESS) {
        String states = "job_3:  " + job_3.getState() + "\n";
        throw new Exception("The state of job_3 is not in a complete state\n" + states);
    }
    if (job_4.getState() != Job.FAILED && job_4.getState() != Job.DEPENDENT_FAILED && job_4.getState() != Job.SUCCESS) {
        String states = "job_4:  " + job_4.getState() + "\n";
        throw new Exception("The state of job_4 is not in a complete state\n" + states);
    }
    if (job_1.getState() == Job.FAILED || job_2.getState() == Job.FAILED || job_1.getState() == Job.DEPENDENT_FAILED || job_2.getState() == Job.DEPENDENT_FAILED) {
        if (job_3.getState() != Job.DEPENDENT_FAILED) {
            String states = "job_1:  " + job_1.getState() + "\n";
            states = "job_2:  " + job_2.getState() + "\n";
            states = "job_3:  " + job_3.getState() + "\n";
            states = "job_4:  " + job_4.getState() + "\n";
            throw new Exception("The states of jobs 1, 2, 3, 4 are not consistent\n" + states);
        }
    }
    if (job_3.getState() == Job.FAILED || job_3.getState() == Job.DEPENDENT_FAILED) {
        if (job_4.getState() != Job.DEPENDENT_FAILED) {
            String states = "job_3:  " + job_3.getState() + "\n";
            states = "job_4:  " + job_4.getState() + "\n";
            throw new Exception("The states of jobs 3, 4 are not consistent\n" + states);
        }
    }
    theControl.stop();
}
264057.931157hadoop
public void testparseOption() throws Exception {
    KeyFieldHelper helper = new KeyFieldHelper();
    helper.setKeyFieldSeparator("\t");
    String keySpecs = "-k1.2,3.4";
    String eKeySpecs = keySpecs;
    helper.parseOption(keySpecs);
    String actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k 1.2";
    eKeySpecs = "-k1.2,0.0";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr -k1.2,3.4";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr -k1.2,3.4n";
    eKeySpecs = "-k1.2,3.4n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr -k1.2,3.4r";
    eKeySpecs = "-k1.2,3.4r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr -k1.2,3.4 -k5.6,7.8n -k9.10,11.12r -k13.14,15.16nr";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8n";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k9.10,11.12r";
    actKeySpecs = helper.keySpecs().get(2).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k13.14,15.16nr";
    actKeySpecs = helper.keySpecs().get(3).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2n,3.4";
    eKeySpecs = "-k1.2,3.4n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2r,3.4";
    eKeySpecs = "-k1.2,3.4r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2nr,3.4";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4n";
    eKeySpecs = "-k1.2,3.4n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4r";
    eKeySpecs = "-k1.2,3.4r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4nr";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr -k1.2,3.4 -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8nr";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-n -k1.2,3.4 -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8n";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-r -k1.2,3.4 -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8r";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4n -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4r -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-k1.2,3.4nr -k5.6,7.8";
    eKeySpecs = "-k1.2,3.4nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    eKeySpecs = "-k5.6,7.8";
    actKeySpecs = helper.keySpecs().get(1).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-n";
    eKeySpecs = "-k1.1,0.0n";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-r";
    eKeySpecs = "-k1.1,0.0r";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
    keySpecs = "-nr";
    eKeySpecs = "-k1.1,0.0nr";
    helper = new KeyFieldHelper();
    helper.parseOption(keySpecs);
    actKeySpecs = helper.keySpecs().get(0).toString();
    assertEquals("KeyFieldHelper's parsing is garbled", eKeySpecs, actKeySpecs);
}
262744.151174hadoop
public static Iterable<Object[]> params() {
    return Arrays.asList(new Object[][] { { "OptmON_FlushCloseTest_EmptyFile_BufferSizeWrite", true, false, 0, TEST_BUFFER_SIZE, 1, false }, { "OptmON_FlushCloseTest_NonEmptyFile_BufferSizeWrite", true, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false }, { "OptmON_CloseTest_EmptyFile_BufferSizeWrite", true, true, 0, TEST_BUFFER_SIZE, 1, false }, { "OptmON_CloseTest_NonEmptyFile_BufferSizeWrite", true, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false }, { "OptmOFF_FlushCloseTest_EmptyFile_BufferSizeWrite", false, false, 0, TEST_BUFFER_SIZE, 1, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_BufferSizeWrite", false, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false }, { "OptmOFF_CloseTest_EmptyFile_BufferSizeWrite", false, true, 0, TEST_BUFFER_SIZE, 1, false }, { "OptmOFF_CloseTest_NonEmptyFile_BufferSizeWrite", false, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 1, false }, { "OptmON_FlushCloseTest_EmptyFile_LessThanBufferSizeWrite", true, false, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true }, { "OptmON_FlushCloseTest_NonEmptyFile_LessThanBufferSizeWrite", true, false, 2 * TEST_BUFFER_SIZE, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true }, { "OptmON_CloseTest_EmptyFile_LessThanBufferSizeWrite", true, true, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true }, { "OptmON_CloseTest_NonEmptyFile_LessThanBufferSizeWrite", true, true, 2 * TEST_BUFFER_SIZE, Math.abs(HALF_TEST_BUFFER_SIZE), 1, true }, { "OptmOFF_FlushCloseTest_EmptyFile_LessThanBufferSizeWrite", false, false, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_LessThanBufferSizeWrite", false, false, 2 * TEST_BUFFER_SIZE, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false }, { "OptmOFF_CloseTest_EmptyFile_LessThanBufferSizeWrite", false, true, 0, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false }, { "OptmOFF_CloseTest_NonEmptyFile_LessThanBufferSizeWrite", false, true, 2 * TEST_BUFFER_SIZE, Math.abs(HALF_TEST_BUFFER_SIZE), 1, false }, { "OptmON_FlushCloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", true, false, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true }, { "OptmON_FlushCloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", true, false, 2 * TEST_BUFFER_SIZE, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true }, { "OptmON_CloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", true, true, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true }, { "OptmON_CloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", true, true, 2 * TEST_BUFFER_SIZE, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, true }, { "OptmOFF_FlushCloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", false, false, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", false, false, 2 * TEST_BUFFER_SIZE, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_CloseTest_EmptyFile_MultiSmallWritesStillLessThanBufferSize", false, true, 0, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_CloseTest_NonEmptyFile_MultiSmallWritesStillLessThanBufferSize", false, true, 2 * TEST_BUFFER_SIZE, Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmON_FlushCloseTest_EmptyFile_MultiBufferSizeWrite", true, false, 0, TEST_BUFFER_SIZE, 3, false }, { "OptmON_FlushCloseTest_NonEmptyFile_MultiBufferSizeWrite", true, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false }, { "OptmON_CloseTest_EmptyFile_MultiBufferSizeWrite", true, true, 0, TEST_BUFFER_SIZE, 3, false }, { "OptmON_CloseTest_NonEmptyFile_MultiBufferSizeWrite", true, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false }, { "OptmOFF_FlushCloseTest_EmptyFile_MultiBufferSizeWrite", false, false, 0, TEST_BUFFER_SIZE, 3, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_MultiBufferSizeWrite", false, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false }, { "OptmOFF_CloseTest_EmptyFile_MultiBufferSizeWrite", false, true, 0, TEST_BUFFER_SIZE, 3, false }, { "OptmOFF_CloseTest_NonEmptyFile_MultiBufferSizeWrite", false, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE, 3, false }, { "OptmON_FlushCloseTest_EmptyFile_BufferAndExtraWrite", true, false, 0, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmON_FlushCloseTest_NonEmptyFile_BufferAndExtraWrite", true, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmON_CloseTest_EmptyFile__BufferAndExtraWrite", true, true, 0, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmON_CloseTest_NonEmptyFile_BufferAndExtraWrite", true, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_FlushCloseTest_EmptyFile_BufferAndExtraWrite", false, false, 0, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_BufferAndExtraWrite", false, false, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_CloseTest_EmptyFile_BufferAndExtraWrite", false, true, 0, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmOFF_CloseTest_NonEmptyFile_BufferAndExtraWrite", false, true, 2 * TEST_BUFFER_SIZE, TEST_BUFFER_SIZE + Math.abs(QUARTER_TEST_BUFFER_SIZE), 3, false }, { "OptmON_FlushCloseTest_EmptyFile_0ByteWrite", true, false, 0, 0, 1, false }, { "OptmON_FlushCloseTest_NonEmptyFile_0ByteWrite", true, false, 2 * TEST_BUFFER_SIZE, 0, 1, false }, { "OptmON_CloseTest_EmptyFile_0ByteWrite", true, true, 0, 0, 1, false }, { "OptmON_CloseTest_NonEmptyFile_0ByteWrite", true, true, 2 * TEST_BUFFER_SIZE, 0, 1, false }, { "OptmOFF_FlushCloseTest_EmptyFile_0ByteWrite", false, false, 0, 0, 1, false }, { "OptmOFF_FlushCloseTest_NonEmptyFile_0ByteWrite", false, false, 2 * TEST_BUFFER_SIZE, 0, 1, false }, { "OptmOFF_CloseTest_EmptyFile_0ByteWrite", false, true, 0, 0, 1, false }, { "OptmOFF_CloseTest_NonEmptyFile_0ByteWrite", false, true, 2 * TEST_BUFFER_SIZE, 0, 1, false } });
}
262930.9824127hadoop
private boolean replayLog(final AuditReplayCommand command) {
    final String src = command.getSrc();
    final String dst = command.getDest();
    FileSystem proxyFs = fsCache.get(command.getSimpleUgi());
    if (proxyFs == null) {
        UserGroupInformation ugi = UserGroupInformation.createProxyUser(command.getSimpleUgi(), loginUser);
        proxyFs = ugi.doAs((PrivilegedAction<FileSystem>) () -> {
            try {
                FileSystem fs = new DistributedFileSystem();
                fs.initialize(namenodeUri, mapperConf);
                return fs;
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        });
        fsCache.put(command.getSimpleUgi(), proxyFs);
    }
    final FileSystem fs = proxyFs;
    ReplayCommand replayCommand;
    try {
        replayCommand = ReplayCommand.valueOf(command.getCommand().split(" ")[0].toUpperCase());
    } catch (IllegalArgumentException iae) {
        LOG.warn("Unsupported/invalid command: " + command);
        replayCountersMap.get(REPLAYCOUNTERS.TOTALUNSUPPORTEDCOMMANDS).increment(1);
        return false;
    }
    try {
        long startTime = System.currentTimeMillis();
        switch(replayCommand) {
            case CREATE:
                FSDataOutputStream fsDos = fs.create(new Path(src));
                if (createBlocks) {
                    fsDos.writeByte(0);
                }
                fsDos.close();
                break;
            case GETFILEINFO:
                fs.getFileStatus(new Path(src));
                break;
            case CONTENTSUMMARY:
                fs.getContentSummary(new Path(src));
                break;
            case MKDIRS:
                fs.mkdirs(new Path(src));
                break;
            case RENAME:
                fs.rename(new Path(src), new Path(dst));
                break;
            case LISTSTATUS:
                ((DistributedFileSystem) fs).getClient().listPaths(src, HdfsFileStatus.EMPTY_NAME);
                break;
            case APPEND:
                fs.append(new Path(src));
                return true;
            case DELETE:
                fs.delete(new Path(src), true);
                break;
            case OPEN:
                fs.open(new Path(src)).close();
                break;
            case SETPERMISSION:
                fs.setPermission(new Path(src), FsPermission.getDefault());
                break;
            case SETOWNER:
                fs.setOwner(new Path(src), UserGroupInformation.getCurrentUser().getShortUserName(), UserGroupInformation.getCurrentUser().getPrimaryGroupName());
                break;
            case SETTIMES:
                fs.setTimes(new Path(src), System.currentTimeMillis(), System.currentTimeMillis());
                break;
            case SETREPLICATION:
                fs.setReplication(new Path(src), (short) 1);
                break;
            case CONCAT:
                String bareDist = dst.length() < 2 ? "" : dst.substring(1, dst.length() - 1).trim();
                List<Path> dsts = new ArrayList<>();
                for (String s : Splitter.on(",").omitEmptyStrings().trimResults().split(bareDist)) {
                    dsts.add(new Path(s));
                }
                fs.concat(new Path(src), dsts.toArray(new Path[] {}));
                break;
            default:
                throw new RuntimeException("Unexpected command: " + replayCommand);
        }
        long latency = System.currentTimeMillis() - startTime;
        UserCommandKey userCommandKey = new UserCommandKey(command.getSimpleUgi(), replayCommand.toString(), replayCommand.getType().toString());
        commandLatencyMap.putIfAbsent(userCommandKey, new CountTimeWritable());
        CountTimeWritable latencyWritable = commandLatencyMap.get(userCommandKey);
        latencyWritable.setCount(latencyWritable.getCount() + 1);
        latencyWritable.setTime(latencyWritable.getTime() + latency);
        switch(replayCommand.getType()) {
            case WRITE:
                replayCountersMap.get(REPLAYCOUNTERS.TOTALWRITECOMMANDLATENCY).increment(latency);
                replayCountersMap.get(REPLAYCOUNTERS.TOTALWRITECOMMANDS).increment(1);
                break;
            case READ:
                replayCountersMap.get(REPLAYCOUNTERS.TOTALREADCOMMANDLATENCY).increment(latency);
                replayCountersMap.get(REPLAYCOUNTERS.TOTALREADCOMMANDS).increment(1);
                break;
            default:
                throw new RuntimeException("Unexpected command type: " + replayCommand.getType());
        }
        individualCommandsMap.get(replayCommand + INDIVIDUAL_COMMANDS_LATENCY_SUFFIX).increment(latency);
        individualCommandsMap.get(replayCommand + INDIVIDUAL_COMMANDS_COUNT_SUFFIX).increment(1);
        return true;
    } catch (IOException e) {
        LOG.debug("IOException: " + e.getLocalizedMessage());
        individualCommandsMap.get(replayCommand + INDIVIDUAL_COMMANDS_INVALID_SUFFIX).increment(1);
        return false;
    }
}
263120.6435113hadoop
 void parseArgv() {
    CommandLine cmdLine = null;
    try {
        cmdLine = parser.parse(allOptions, argv_);
    } catch (Exception oe) {
        LOG.error(oe.getMessage());
        exitUsage(argv_.length > 0 && "-info".equals(argv_[0]));
    }
    if (cmdLine != null) {
        @SuppressWarnings("unchecked")
        List<String> args = cmdLine.getArgList();
        if (args != null && args.size() > 0) {
            fail("Found " + args.size() + " unexpected arguments on the " + "command line " + args);
        }
        detailedUsage_ = cmdLine.hasOption("info");
        if (cmdLine.hasOption("help") || detailedUsage_) {
            printUsage = true;
            return;
        }
        verbose_ = cmdLine.hasOption("verbose");
        background_ = cmdLine.hasOption("background");
        debug_ = cmdLine.hasOption("debug") ? debug_ + 1 : debug_;
        String[] values = cmdLine.getOptionValues("input");
        if (values != null && values.length > 0) {
            for (String input : values) {
                inputSpecs_.add(input);
            }
        }
        output_ = cmdLine.getOptionValue("output");
        mapCmd_ = cmdLine.getOptionValue("mapper");
        comCmd_ = cmdLine.getOptionValue("combiner");
        redCmd_ = cmdLine.getOptionValue("reducer");
        lazyOutput_ = cmdLine.hasOption("lazyOutput");
        values = cmdLine.getOptionValues("file");
        if (values != null && values.length > 0) {
            LOG.warn("-file option is deprecated, please use generic option" + " -files instead.");
            StringBuffer fileList = new StringBuffer();
            for (String file : values) {
                packageFiles_.add(file);
                try {
                    Path path = new Path(file);
                    FileSystem localFs = FileSystem.getLocal(config_);
                    Path qualifiedPath = path.makeQualified(localFs.getUri(), localFs.getWorkingDirectory());
                    validate(qualifiedPath);
                    String finalPath = qualifiedPath.toString();
                    if (fileList.length() > 0) {
                        fileList.append(',');
                    }
                    fileList.append(finalPath);
                } catch (Exception e) {
                    throw new IllegalArgumentException(e);
                }
            }
            String tmpFiles = config_.get("tmpfiles", "");
            if (tmpFiles.isEmpty()) {
                tmpFiles = fileList.toString();
            } else {
                tmpFiles = tmpFiles + "," + fileList;
            }
            config_.set("tmpfiles", tmpFiles);
        }
        String fsName = cmdLine.getOptionValue("dfs");
        if (null != fsName) {
            LOG.warn("-dfs option is deprecated, please use -fs instead.");
            config_.set("fs.default.name", fsName);
        }
        additionalConfSpec_ = cmdLine.getOptionValue("additionalconfspec");
        inputFormatSpec_ = cmdLine.getOptionValue("inputformat");
        outputFormatSpec_ = cmdLine.getOptionValue("outputformat");
        numReduceTasksSpec_ = cmdLine.getOptionValue("numReduceTasks");
        partitionerSpec_ = cmdLine.getOptionValue("partitioner");
        inReaderSpec_ = cmdLine.getOptionValue("inputreader");
        mapDebugSpec_ = cmdLine.getOptionValue("mapdebug");
        reduceDebugSpec_ = cmdLine.getOptionValue("reducedebug");
        ioSpec_ = cmdLine.getOptionValue("io");
        String[] car = cmdLine.getOptionValues("cacheArchive");
        if (null != car && car.length > 0) {
            LOG.warn("-cacheArchive option is deprecated, please use -archives instead.");
            for (String s : car) {
                cacheArchives = (cacheArchives == null) ? s : cacheArchives + "," + s;
            }
        }
        String[] caf = cmdLine.getOptionValues("cacheFile");
        if (null != caf && caf.length > 0) {
            LOG.warn("-cacheFile option is deprecated, please use -files instead.");
            for (String s : caf) {
                cacheFiles = (cacheFiles == null) ? s : cacheFiles + "," + s;
            }
        }
        String[] jobconf = cmdLine.getOptionValues("jobconf");
        if (null != jobconf && jobconf.length > 0) {
            LOG.warn("-jobconf option is deprecated, please use -D instead.");
            for (String s : jobconf) {
                String[] parts = s.split("=", 2);
                config_.set(parts[0], parts[1]);
            }
        }
        String[] cmd = cmdLine.getOptionValues("cmdenv");
        if (null != cmd && cmd.length > 0) {
            for (String s : cmd) {
                if (addTaskEnvironment_.length() > 0) {
                    addTaskEnvironment_ += " ";
                }
                addTaskEnvironment_ += s;
            }
        }
    } else {
        exitUsage(argv_.length > 0 && "-info".equals(argv_[0]));
    }
}
262698.3629118hadoop
public static void validateAndResolveService(Service service, SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws IOException {
    boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, RegistryConstants.DEFAULT_DNS_ENABLED);
    if (dnsEnabled) {
        if (RegistryUtils.currentUser().length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) {
            throw new IllegalArgumentException(RestApiErrorMessages.ERROR_USER_NAME_INVALID);
        }
        userNamePattern.validate(RegistryUtils.currentUser());
    }
    if (StringUtils.isEmpty(service.getName())) {
        throw new IllegalArgumentException(RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
    }
    if (StringUtils.isEmpty(service.getVersion())) {
        throw new IllegalArgumentException(String.format(RestApiErrorMessages.ERROR_APPLICATION_VERSION_INVALID, service.getName()));
    }
    validateNameFormat(service.getName(), conf);
    if (!hasComponent(service)) {
        throw new IllegalArgumentException("No component specified for " + service.getName());
    }
    if (UserGroupInformation.isSecurityEnabled()) {
        validateKerberosPrincipal(service.getKerberosPrincipal());
    }
    try {
        validateDockerClientConfiguration(service, conf);
    } catch (IOException e) {
        throw new IllegalArgumentException(e);
    }
    Configuration globalConf = service.getConfiguration();
    Set<String> componentNames = new HashSet<>();
    List<Component> componentsToRemove = new ArrayList<>();
    List<Component> componentsToAdd = new ArrayList<>();
    for (Component comp : service.getComponents()) {
        int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH;
        maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length();
        if (dnsEnabled && comp.getName().length() > maxCompLength) {
            throw new IllegalArgumentException(String.format(RestApiErrorMessages.ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName()));
        }
        if (service.getName().equals(comp.getName())) {
            throw new IllegalArgumentException(String.format(RestApiErrorMessages.ERROR_COMPONENT_NAME_CONFLICTS_WITH_SERVICE_NAME, comp.getName(), service.getName()));
        }
        if (componentNames.contains(comp.getName())) {
            throw new IllegalArgumentException("Component name collision: " + comp.getName());
        }
        if (comp.getArtifact() != null && comp.getArtifact().getType() == Artifact.TypeEnum.SERVICE) {
            if (StringUtils.isEmpty(comp.getArtifact().getId())) {
                throw new IllegalArgumentException(RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
            }
            LOG.info("Marking {} for removal", comp.getName());
            componentsToRemove.add(comp);
            List<Component> externalComponents = getComponents(fs, comp.getArtifact().getId());
            for (Component c : externalComponents) {
                Component override = service.getComponent(c.getName());
                if (override != null && override.getArtifact() == null) {
                    override.mergeFrom(c);
                    LOG.info("Merging external component {} from external {}", c.getName(), comp.getName());
                } else {
                    if (componentNames.contains(c.getName())) {
                        throw new IllegalArgumentException("Component name collision: " + c.getName());
                    }
                    componentNames.add(c.getName());
                    componentsToAdd.add(c);
                    LOG.info("Adding component {} from external {}", c.getName(), comp.getName());
                }
            }
        } else {
            componentNames.add(comp.getName());
            comp.getConfiguration().mergeFrom(globalConf);
        }
    }
    service.getComponents().removeAll(componentsToRemove);
    service.getComponents().addAll(componentsToAdd);
    Artifact globalArtifact = service.getArtifact();
    Resource globalResource = service.getResource();
    for (Component comp : service.getComponents()) {
        if (comp.getArtifact() == null && service.getArtifact() != null && service.getArtifact().getType() != Artifact.TypeEnum.SERVICE) {
            comp.setArtifact(globalArtifact);
        }
        if (comp.getResource() == null) {
            comp.setResource(globalResource);
        }
        if (comp.getDependencies() != null) {
            for (String dependency : comp.getDependencies()) {
                if (!componentNames.contains(dependency)) {
                    throw new IllegalArgumentException(String.format(RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency, comp.getName()));
                }
            }
        }
        validateComponent(comp, fs.getFileSystem(), conf);
    }
    validatePlacementPolicy(service.getComponents(), componentNames);
    sortByDependencies(service.getComponents());
    if (service.getLifetime() == null) {
        service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
    }
}
262439.1138109hadoop
public int run(String[] args) throws Exception {
    YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration(getConf());
    boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED);
    if (args.length < 1) {
        printUsage("", isHAEnabled);
        return -1;
    }
    int exitCode = -1;
    int i = 0;
    String cmd = args[i++];
    exitCode = 0;
    if ("-help".equals(cmd)) {
        if (i < args.length) {
            printUsage(args[i], isHAEnabled);
        } else {
            printHelp("", isHAEnabled);
        }
        return exitCode;
    }
    if (USAGE.containsKey(cmd)) {
        if (isHAEnabled) {
            return super.run(args);
        }
        System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled");
        return -1;
    }
    String subClusterId = StringUtils.EMPTY;
    if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd) || "-refreshClusterMaxPriority".equals(cmd)) {
        subClusterId = parseSubClusterId(args, isHAEnabled);
        if (isYarnFederationEnabled(getConf()) && args.length != 1 && args.length != 3) {
            printUsage(cmd, isHAEnabled);
            return exitCode;
        } else if (!isYarnFederationEnabled(getConf()) && args.length != 1) {
            printUsage(cmd, isHAEnabled);
            return exitCode;
        }
    }
    if (isYarnFederationEnabled(getConf())) {
        System.out.println("Using YARN Federation mode.");
    }
    try {
        if ("-refreshQueues".equals(cmd)) {
            exitCode = refreshQueues(subClusterId);
        } else if ("-refreshNodes".equals(cmd)) {
            exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
        } else if ("-refreshNodesResources".equals(cmd)) {
            exitCode = refreshNodesResources(subClusterId);
        } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
            exitCode = refreshUserToGroupsMappings(subClusterId);
        } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
            exitCode = refreshSuperUserGroupsConfiguration(subClusterId);
        } else if ("-refreshAdminAcls".equals(cmd)) {
            exitCode = refreshAdminAcls(subClusterId);
        } else if ("-refreshServiceAcl".equals(cmd)) {
            exitCode = refreshServiceAcls(subClusterId);
        } else if ("-refreshClusterMaxPriority".equals(cmd)) {
            exitCode = refreshClusterMaxPriority(subClusterId);
        } else if ("-getGroups".equals(cmd)) {
            String[] usernames = Arrays.copyOfRange(args, i, args.length);
            exitCode = getGroups(usernames);
        } else if ("-updateNodeResource".equals(cmd)) {
            exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled, subClusterId);
        } else if ("-addToClusterNodeLabels".equals(cmd)) {
            exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
        } else if ("-removeFromClusterNodeLabels".equals(cmd)) {
            exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
        } else if ("-replaceLabelsOnNode".equals(cmd)) {
            exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
        } else {
            exitCode = -1;
            System.err.println(cmd.substring(1) + ": Unknown command");
            printUsage("", isHAEnabled);
        }
    } catch (IllegalArgumentException arge) {
        exitCode = -1;
        System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
        printUsage(cmd, isHAEnabled);
    } catch (RemoteException e) {
        exitCode = -1;
        try {
            String[] content;
            content = e.getLocalizedMessage().split("\n");
            System.err.println(cmd.substring(1) + ": " + content[0]);
        } catch (Exception ex) {
            System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage());
        }
    } catch (Exception e) {
        exitCode = -1;
        System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage());
    }
    if (null != localNodeLabelsManager) {
        localNodeLabelsManager.stop();
    }
    return exitCode;
}
263757.8113140hadoop
public void testMixedAllocationAndRelease() throws YarnException, IOException {
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    int containersRequestedNode = amClient.getTable(0).get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int containersRequestedRack = amClient.getTable(0).get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int containersRequestedAny = amClient.getTable(0).get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
    assertEquals(4, containersRequestedNode);
    assertEquals(4, containersRequestedRack);
    assertEquals(4, containersRequestedAny);
    assertEquals(2, oppContainersRequestedAny);
    assertEquals(4, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    containersRequestedNode = amClient.getTable(0).get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    containersRequestedRack = amClient.getTable(0).get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    containersRequestedAny = amClient.getTable(0).get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability).remoteRequest.getNumContainers();
    oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
    assertEquals(2, containersRequestedNode);
    assertEquals(2, containersRequestedRack);
    assertEquals(2, containersRequestedAny);
    assertEquals(1, oppContainersRequestedAny);
    assertEquals(4, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    int allocatedContainerCount = 0;
    int allocatedOpportContainerCount = 0;
    int iterationsLeft = 50;
    Set<ContainerId> releases = new TreeSet<>();
    amClient.getNMTokenCache().clearCache();
    Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache());
    HashMap<String, Token> receivedNMTokens = new HashMap<>();
    while (allocatedContainerCount < containersRequestedAny + oppContainersRequestedAny && iterationsLeft-- > 0) {
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        assertEquals(0, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        allocatedContainerCount += allocResponse.getAllocatedContainers().size();
        for (Container container : allocResponse.getAllocatedContainers()) {
            if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
                allocatedOpportContainerCount++;
            }
            ContainerId rejectContainerId = container.getId();
            releases.add(rejectContainerId);
        }
        for (NMToken token : allocResponse.getNMTokens()) {
            String nodeID = token.getNodeId().toString();
            receivedNMTokens.put(nodeID, token.getToken());
        }
        if (allocatedContainerCount < containersRequestedAny) {
            sleep(100);
        }
    }
    assertEquals(containersRequestedAny + oppContainersRequestedAny, allocatedContainerCount);
    assertEquals(oppContainersRequestedAny, allocatedOpportContainerCount);
    for (ContainerId rejectContainerId : releases) {
        amClient.releaseAssignedContainer(rejectContainerId);
    }
    assertEquals(3, amClient.release.size());
    assertEquals(0, amClient.ask.size());
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority));
    amClient.removeContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    assertEquals(4, amClient.ask.size());
    iterationsLeft = 3;
    while (iterationsLeft-- > 0) {
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        assertEquals(0, allocResponse.getAllocatedContainers().size());
        if (allocResponse.getCompletedContainersStatuses().size() > 0) {
            for (ContainerStatus cStatus : allocResponse.getCompletedContainersStatuses()) {
                if (releases.contains(cStatus.getContainerId())) {
                    assertThat(cStatus.getState()).isEqualTo(ContainerState.COMPLETE);
                    assertEquals(-100, cStatus.getExitStatus());
                    releases.remove(cStatus.getContainerId());
                }
            }
        }
        if (iterationsLeft > 0) {
            sleep(100);
        }
    }
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
}
26637.7619221hadoop
 void testLogAggregationIndexFileFormat() throws Exception {
    if (fs.exists(rootLocalLogDirPath)) {
        fs.delete(rootLocalLogDirPath, true);
    }
    assertTrue(fs.mkdirs(rootLocalLogDirPath));
    Path appLogsDir = new Path(rootLocalLogDirPath, appId.toString());
    if (fs.exists(appLogsDir)) {
        fs.delete(appLogsDir, true);
    }
    assertTrue(fs.mkdirs(appLogsDir));
    List<String> logTypes = new ArrayList<String>();
    logTypes.add("syslog");
    logTypes.add("stdout");
    logTypes.add("stderr");
    Set<File> files = new HashSet<>();
    LogKey key1 = new LogKey(containerId.toString());
    for (String logType : logTypes) {
        File file = createAndWriteLocalLogFile(containerId, appLogsDir, logType);
        files.add(file);
    }
    files.add(createZeroLocalLogFile(appLogsDir));
    LogValue value = mock(LogValue.class);
    when(value.getPendingLogFilesToUploadForThisContainer()).thenReturn(files);
    final ControlledClock clock = new ControlledClock();
    clock.setTime(System.currentTimeMillis());
    LogAggregationIndexedFileController fileFormat = new LogAggregationIndexedFileController() {

        private int rollOverCheck = 0;

        @Override
        public Clock getSystemClock() {
            return clock;
        }

        @Override
        public boolean isRollover(final FileContext fc, final Path candidate) throws IOException {
            rollOverCheck++;
            if (rollOverCheck >= 3) {
                return true;
            }
            return false;
        }
    };
    fileFormat.initialize(getConf(), "Indexed");
    Map<ApplicationAccessType, String> appAcls = new HashMap<>();
    Path appDir = fileFormat.getRemoteAppLogDir(appId, USER_UGI.getShortUserName());
    if (fs.exists(appDir)) {
        fs.delete(appDir, true);
    }
    assertTrue(fs.mkdirs(appDir));
    Path logPath = fileFormat.getRemoteNodeLogFileForApp(appId, USER_UGI.getShortUserName(), nodeId);
    LogAggregationFileControllerContext context = new LogAggregationFileControllerContext(logPath, logPath, true, 1000, appId, appAcls, nodeId, USER_UGI);
    fileFormat.initializeWriter(context);
    fileFormat.write(key1, value);
    fileFormat.postWrite(context);
    fileFormat.closeWriter();
    ContainerLogsRequest logRequest = new ContainerLogsRequest();
    logRequest.setAppId(appId);
    logRequest.setNodeId(nodeId.toString());
    logRequest.setAppOwner(USER_UGI.getShortUserName());
    logRequest.setContainerId(containerId.toString());
    logRequest.setBytes(Long.MAX_VALUE);
    List<ContainerLogMeta> meta = fileFormat.readAggregatedLogsMeta(logRequest);
    assertEquals(1, meta.size());
    List<String> fileNames = new ArrayList<>();
    for (ContainerLogMeta log : meta) {
        assertEquals(containerId.toString(), log.getContainerId());
        assertEquals(nodeId.toString(), log.getNodeId());
        assertEquals(4, log.getContainerLogMeta().size());
        for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
            fileNames.add(file.getFileName());
        }
    }
    fileNames.removeAll(logTypes);
    fileNames.remove(ZERO_FILE);
    assertTrue(fileNames.isEmpty());
    boolean foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
    assertTrue(foundLogs);
    for (String logType : logTypes) {
        assertTrue(sysOutStream.toString().contains(logMessage(containerId, logType)));
    }
    assertZeroFileIsContained(sysOutStream.toString());
    sysOutStream.reset();
    Configuration factoryConf = new Configuration(getConf());
    factoryConf.set("yarn.log-aggregation.file-formats", "Indexed");
    factoryConf.set("yarn.log-aggregation.file-controller.Indexed.class", "org.apache.hadoop.yarn.logaggregation.filecontroller.ifile" + ".LogAggregationIndexedFileController");
    LogAggregationFileControllerFactory factory = new LogAggregationFileControllerFactory(factoryConf);
    LogAggregationFileController fileController = factory.getFileControllerForRead(appId, USER_UGI.getShortUserName());
    assertTrue(fileController instanceof LogAggregationIndexedFileController);
    foundLogs = fileController.readAggregatedLogs(logRequest, System.out);
    assertTrue(foundLogs);
    for (String logType : logTypes) {
        assertTrue(sysOutStream.toString().contains(logMessage(containerId, logType)));
    }
    sysOutStream.reset();
    Path checksumFile = new Path(fileFormat.getRemoteAppLogDir(appId, USER_UGI.getShortUserName()), LogAggregationUtils.getNodeString(nodeId) + LogAggregationIndexedFileController.CHECK_SUM_FILE_SUFFIX);
    FSDataOutputStream fInput = null;
    try {
        String nodeName = logPath.getName() + "_" + clock.getTime();
        fInput = FileSystem.create(fs, checksumFile, LOG_FILE_UMASK);
        fInput.writeInt(nodeName.length());
        fInput.write(nodeName.getBytes(StandardCharsets.UTF_8));
        fInput.writeLong(0);
    } finally {
        IOUtils.closeStream(fInput);
    }
    meta = fileFormat.readAggregatedLogsMeta(logRequest);
    assertTrue(meta.isEmpty());
    foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
    assertFalse(foundLogs);
    sysOutStream.reset();
    fs.delete(checksumFile, false);
    assertFalse(fs.exists(checksumFile));
    List<String> newLogTypes = new ArrayList<>(logTypes);
    files.clear();
    newLogTypes.add("test1");
    files.add(createAndWriteLocalLogFile(containerId, appLogsDir, "test1"));
    newLogTypes.add("test2");
    files.add(createAndWriteLocalLogFile(containerId, appLogsDir, "test2"));
    LogValue value2 = mock(LogValue.class);
    when(value2.getPendingLogFilesToUploadForThisContainer()).thenReturn(files);
    fileFormat.initializeWriter(context);
    fileFormat.write(key1, value2);
    fileFormat.closeWriter();
    meta = fileFormat.readAggregatedLogsMeta(logRequest);
    assertThat(meta.size()).isEqualTo(1);
    for (ContainerLogMeta log : meta) {
        assertEquals(containerId.toString(), log.getContainerId());
        assertEquals(nodeId.toString(), log.getNodeId());
        assertEquals(4, log.getContainerLogMeta().size());
        for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
            fileNames.add(file.getFileName());
        }
    }
    fileNames.removeAll(logTypes);
    fileNames.remove(ZERO_FILE);
    assertTrue(fileNames.isEmpty());
    foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
    assertTrue(foundLogs);
    for (String logType : logTypes) {
        assertTrue(sysOutStream.toString().contains(logMessage(containerId, logType)));
    }
    assertFalse(sysOutStream.toString().contains(logMessage(containerId, "test1")));
    assertFalse(sysOutStream.toString().contains(logMessage(containerId, "test2")));
    sysOutStream.reset();
    fileFormat.initializeWriter(context);
    fileFormat.write(key1, value2);
    fileFormat.postWrite(context);
    fileFormat.closeWriter();
    meta = fileFormat.readAggregatedLogsMeta(logRequest);
    assertThat(meta.size()).isEqualTo(2);
    for (ContainerLogMeta log : meta) {
        assertEquals(containerId.toString(), log.getContainerId());
        assertEquals(nodeId.toString(), log.getNodeId());
        for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
            fileNames.add(file.getFileName());
        }
    }
    fileNames.removeAll(newLogTypes);
    fileNames.remove(ZERO_FILE);
    assertTrue(fileNames.isEmpty());
    foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
    assertTrue(foundLogs);
    for (String logType : newLogTypes) {
        assertTrue(sysOutStream.toString().contains(logMessage(containerId, logType)));
    }
    sysOutStream.reset();
    clock.setTime(System.currentTimeMillis());
    fileFormat.initializeWriter(context);
    fileFormat.write(key1, value2);
    fileFormat.postWrite(context);
    fileFormat.closeWriter();
    FileStatus[] status = fs.listStatus(logPath.getParent());
    assertEquals(2, status.length);
    meta = fileFormat.readAggregatedLogsMeta(logRequest);
    assertThat(meta.size()).isEqualTo(3);
    for (ContainerLogMeta log : meta) {
        assertEquals(containerId.toString(), log.getContainerId());
        assertEquals(nodeId.toString(), log.getNodeId());
        for (ContainerLogFileInfo file : log.getContainerLogMeta()) {
            fileNames.add(file.getFileName());
        }
    }
    fileNames.removeAll(newLogTypes);
    fileNames.remove(ZERO_FILE);
    assertTrue(fileNames.isEmpty());
    foundLogs = fileFormat.readAggregatedLogs(logRequest, System.out);
    assertTrue(foundLogs);
    for (String logType : newLogTypes) {
        assertTrue(sysOutStream.toString().contains(logMessage(containerId, logType)));
    }
    sysOutStream.reset();
}
262892.325132hadoop
public void allocate(ApplicationAttemptId appAttemptId, AllocateRequest request, AllocateResponse response) throws YarnException {
    handleProgress(appAttemptId, request);
    List<ResourceRequest> ask = request.getAskList();
    List<ContainerId> release = request.getReleaseList();
    ResourceBlacklistRequest blacklistRequest = request.getResourceBlacklistRequest();
    List<String> blacklistAdditions = (blacklistRequest != null) ? blacklistRequest.getBlacklistAdditions() : Collections.emptyList();
    List<String> blacklistRemovals = (blacklistRequest != null) ? blacklistRequest.getBlacklistRemovals() : Collections.emptyList();
    RMApp app = getRmContext().getRMApps().get(appAttemptId.getApplicationId());
    ApplicationSubmissionContext asc = app.getApplicationSubmissionContext();
    for (ResourceRequest req : ask) {
        if (null == req.getNodeLabelExpression() && ResourceRequest.ANY.equals(req.getResourceName())) {
            req.setNodeLabelExpression(asc.getNodeLabelExpression());
        }
        if (ResourceRequest.ANY.equals(req.getResourceName())) {
            SchedulerUtils.enforcePartitionExclusivity(req, exclusiveEnforcedPartitions, asc.getNodeLabelExpression());
        }
    }
    Resource maximumCapacity = getScheduler().getMaximumResourceCapability(app.getQueue());
    try {
        RMServerUtils.normalizeAndValidateRequests(ask, maximumCapacity, app.getQueue(), getScheduler(), getRmContext(), nodelabelsEnabled);
    } catch (InvalidResourceRequestException e) {
        RMAppAttempt rmAppAttempt = app.getRMAppAttempt(appAttemptId);
        handleInvalidResourceException(e, rmAppAttempt);
    }
    try {
        RMServerUtils.validateBlacklistRequest(blacklistRequest);
    } catch (InvalidResourceBlacklistRequestException e) {
        LOG.warn("Invalid blacklist request by application " + appAttemptId, e);
        throw e;
    }
    if (!app.getApplicationSubmissionContext().getKeepContainersAcrossApplicationAttempts()) {
        try {
            RMServerUtils.validateContainerReleaseRequest(release, appAttemptId);
        } catch (InvalidContainerReleaseException e) {
            LOG.warn("Invalid container release by application " + appAttemptId, e);
            throw e;
        }
    }
    List<UpdateContainerError> updateErrors = new ArrayList<>();
    ContainerUpdates containerUpdateRequests = RMServerUtils.validateAndSplitUpdateResourceRequests(getRmContext(), request, maximumCapacity, updateErrors);
    Allocation allocation;
    RMAppAttemptState state = app.getRMAppAttempt(appAttemptId).getAppAttemptState();
    if (state.equals(RMAppAttemptState.FINAL_SAVING) || state.equals(RMAppAttemptState.FINISHING) || app.isAppFinalStateStored()) {
        LOG.warn(appAttemptId + " is in " + state + " state, ignore container allocate request.");
        allocation = EMPTY_ALLOCATION;
    } else {
        try {
            allocation = getScheduler().allocate(appAttemptId, ask, request.getSchedulingRequests(), release, blacklistAdditions, blacklistRemovals, containerUpdateRequests);
        } catch (SchedulerInvalidResourceRequestException e) {
            LOG.warn("Exceptions caught when scheduler handling requests");
            throw new YarnException(e);
        }
    }
    if (!blacklistAdditions.isEmpty() || !blacklistRemovals.isEmpty()) {
        LOG.info("blacklist are updated in Scheduler." + "blacklistAdditions: " + blacklistAdditions + ", " + "blacklistRemovals: " + blacklistRemovals);
    }
    RMAppAttempt appAttempt = app.getRMAppAttempt(appAttemptId);
    if (allocation.getNMTokens() != null && !allocation.getNMTokens().isEmpty()) {
        response.setNMTokens(allocation.getNMTokens());
    }
    ApplicationMasterServiceUtils.addToUpdateContainerErrors(response, updateErrors);
    handleNodeUpdates(app, response);
    ApplicationMasterServiceUtils.addToAllocatedContainers(response, allocation.getContainers());
    response.setCompletedContainersStatuses(appAttempt.pullJustFinishedContainers());
    response.setAvailableResources(allocation.getResourceLimit());
    QueueMetrics queueMetrics = this.rmContext.getScheduler().getRootQueueMetrics();
    if (queueMetrics != null) {
        int totalVirtualCores = queueMetrics.getAllocatedVirtualCores() + queueMetrics.getAvailableVirtualCores();
        int pendingContainers = queueMetrics.getPendingContainers();
        response.setEnhancedHeadroom(EnhancedHeadroom.newInstance(pendingContainers, totalVirtualCores));
    }
    addToContainerUpdates(response, allocation, ((AbstractYarnScheduler) getScheduler()).getApplicationAttempt(appAttemptId).pullUpdateContainerErrors());
    String label = "";
    try {
        label = rmContext.getScheduler().getQueueInfo(app.getQueue(), false, false).getDefaultNodeLabelExpression();
    } catch (Exception e) {
    }
    if (label == null || label.equals("")) {
        response.setNumClusterNodes(getScheduler().getNumClusterNodes());
    } else {
        response.setNumClusterNodes(rmContext.getNodeLabelManager().getActiveNMCountPerLabel(label));
    }
    if (timelineServiceV2Enabled) {
        CollectorInfo collectorInfo = app.getCollectorInfo();
        if (collectorInfo != null) {
            response.setCollectorInfo(collectorInfo);
        }
    }
    response.setPreemptionMessage(generatePreemptionMessage(allocation));
    response.setApplicationPriority(app.getApplicationPriority());
    response.setContainersFromPreviousAttempts(allocation.getPreviousAttemptContainers());
    response.setRejectedSchedulingRequests(allocation.getRejectedRequest());
}
262411.4512159hadoop
 void testEventFiltersParsing() throws Exception {
    String expr = "abc,def";
    TimelineFilterList expectedList = new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "abc"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "def"));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "(abc,def)";
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "(abc,def) OR (rst, uvx)";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "abc"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "def")), new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "rst"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "uvx")));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "!(abc,def,uvc) OR (rst, uvx)";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "abc"), new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "def"), new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "uvc")), new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "rst"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "uvx")));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "(((!(abc,def,uvc) OR (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb))";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "abc"), new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "def"), new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "uvc")), new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "rst"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "uvx"))), new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "abcdefg")), new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "ghj"), new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "tyu")))), new TimelineFilterList(new TimelineFilterList(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "bcd"), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "tyu")), new TimelineExistsFilter(TimelineCompareOp.EQUAL, "uvb")));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "  (  (  (  !  (  abc , def  ,   uvc)   OR   (   rst  ,   uvx )  )" + "  AND   (  !  (  abcdefg ) OR  !   (  ghj,  tyu)  ) )  OR   (   (" + "   bcd   ,   tyu  )   AND   uvb  )   )";
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseEventFilters(expr), expectedList);
    expr = "(((!(abc,def,uvc) OR (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb)";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Improper brackets. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(((!(abc,def,uvc) (OR (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb))";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected opening bracket. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(((!(abc,def,uvc) OR) (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb))";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected closing bracket. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(((!(abc,def,uvc) PI (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb))";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Invalid op. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(((!(abc,def,uvc) !OR (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu)))" + " OR ((bcd,tyu) AND uvb))";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected ! char. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "abc,def,uvc) OR (rst, uvx)";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected closing bracket. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "abc,def,uvc OR )rst, uvx)";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected closing bracket. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "abc,def,uvc OR ,rst, uvx)";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected delimiter. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "abc,def,uvc OR !  ";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unexpected not char. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(abc,def,uvc)) OR (rst, uvx)";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("Unbalanced brackets. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    expr = "(((! ,(abc,def,uvc) OR (rst, uvx)) AND (!(abcdefg) OR !(ghj,tyu" + "))) OR ((bcd,tyu) AND uvb))";
    try {
        TimelineReaderWebServicesUtils.parseEventFilters(expr);
        fail("( should follow ! char. Exception should have been thrown");
    } catch (TimelineParseException e) {
    }
    assertNull(TimelineReaderWebServicesUtils.parseEventFilters(null));
    assertNull(TimelineReaderWebServicesUtils.parseEventFilters("   "));
}
263018.7830118hadoop
public void testGetFlowRuns() throws Exception {
    Client client = createClient();
    try {
        URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs");
        ClientResponse resp = getResponse(client, uri);
        Set<FlowRunEntity> entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(2, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", ((entity.getId().equals("user1@flow_name/1002345678919")) && (entity.getRunId() == 1002345678919L) && (entity.getStartTime() == 1425016501000L)) || ((entity.getId().equals("user1@flow_name/1002345678920")) && (entity.getRunId() == 1002345678920L) && (entity.getStartTime() == 1425016501034L)));
            assertEquals(0, entity.getMetrics().size());
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/" + "clusters/cluster1/users/user1/flows/flow_name/runs?limit=1");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(1, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", entity.getId().equals("user1@flow_name/1002345678920") && entity.getRunId() == 1002345678920L && entity.getStartTime() == 1425016501034L);
            assertEquals(0, entity.getMetrics().size());
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" + "createdtimestart=1425016501030");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(1, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", entity.getId().equals("user1@flow_name/1002345678920") && entity.getRunId() == 1002345678920L && entity.getStartTime() == 1425016501034L);
            assertEquals(0, entity.getMetrics().size());
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" + "createdtimestart=1425016500999&createdtimeend=1425016501035");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(2, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", ((entity.getId().equals("user1@flow_name/1002345678919")) && (entity.getRunId() == 1002345678919L) && (entity.getStartTime() == 1425016501000L)) || ((entity.getId().equals("user1@flow_name/1002345678920")) && (entity.getRunId() == 1002345678920L) && (entity.getStartTime() == 1425016501034L)));
            assertEquals(0, entity.getMetrics().size());
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" + "createdtimeend=1425016501030");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(1, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", entity.getId().equals("user1@flow_name/1002345678919") && entity.getRunId() == 1002345678919L && entity.getStartTime() == 1425016501000L);
            assertEquals(0, entity.getMetrics().size());
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" + "fields=metrics");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
        });
        assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8", resp.getType().toString());
        assertNotNull(entities);
        assertEquals(2, entities.size());
        for (FlowRunEntity entity : entities) {
            assertTrue("Id, run id or start time does not match.", ((entity.getId().equals("user1@flow_name/1002345678919")) && (entity.getRunId() == 1002345678919L) && (entity.getStartTime() == 1425016501000L) && (entity.getMetrics().size() == 3)) || ((entity.getId().equals("user1@flow_name/1002345678920")) && (entity.getRunId() == 1002345678920L) && (entity.getStartTime() == 1425016501034L) && (entity.getMetrics().size() == 1)));
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" + "fields=CONFIGS");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
    } finally {
        client.destroy();
    }
}
264142.6916135hadoop
public void testGetEntitiesByUID() throws Exception {
    Client client = createClient();
    try {
        URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/flows");
        ClientResponse resp = getResponse(client, uri);
        Set<FlowActivityEntity> flowEntities = resp.getEntity(new GenericType<Set<FlowActivityEntity>>() {
        });
        assertNotNull(flowEntities);
        assertEquals(3, flowEntities.size());
        List<String> listFlowUIDs = new ArrayList<String>();
        for (FlowActivityEntity entity : flowEntities) {
            String flowUID = (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
            listFlowUIDs.add(flowUID);
            assertEquals(TimelineUIDConverter.FLOW_UID.encodeUID(new TimelineReaderContext(entity.getCluster(), entity.getUser(), entity.getFlowName(), null, null, null, null)), flowUID);
            assertTrue((entity.getId().endsWith("@flow_name") && entity.getFlowRuns().size() == 2) || (entity.getId().endsWith("@flow_name2") && entity.getFlowRuns().size() == 1) || (entity.getId().endsWith("@flow1") && entity.getFlowRuns().size() == 3));
        }
        List<String> listFlowRunUIDs = new ArrayList<String>();
        for (String flowUID : listFlowUIDs) {
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/flow-uid/" + flowUID + "/runs");
            resp = getResponse(client, uri);
            Set<FlowRunEntity> frEntities = resp.getEntity(new GenericType<Set<FlowRunEntity>>() {
            });
            assertNotNull(frEntities);
            for (FlowRunEntity entity : frEntities) {
                String flowRunUID = (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
                listFlowRunUIDs.add(flowRunUID);
                assertEquals(TimelineUIDConverter.FLOWRUN_UID.encodeUID(new TimelineReaderContext("cluster1", entity.getUser(), entity.getName(), entity.getRunId(), null, null, null)), flowRunUID);
            }
        }
        assertEquals(6, listFlowRunUIDs.size());
        for (String flowRunUID : listFlowRunUIDs) {
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/run-uid/" + flowRunUID);
            resp = getResponse(client, uri);
            FlowRunEntity entity = resp.getEntity(FlowRunEntity.class);
            assertNotNull(entity);
        }
        List<String> listAppUIDs = new ArrayList<String>();
        for (String flowRunUID : listFlowRunUIDs) {
            TimelineReaderContext context = TimelineUIDConverter.FLOWRUN_UID.decodeUID(flowRunUID);
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/run-uid/" + flowRunUID + "/apps");
            resp = getResponse(client, uri);
            Set<TimelineEntity> appEntities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
            });
            assertNotNull(appEntities);
            for (TimelineEntity entity : appEntities) {
                String appUID = (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
                listAppUIDs.add(appUID);
                assertEquals(TimelineUIDConverter.APPLICATION_UID.encodeUID(new TimelineReaderContext(context.getClusterId(), context.getUserId(), context.getFlowName(), context.getFlowRunId(), entity.getId(), null, null)), appUID);
            }
        }
        assertEquals(19, listAppUIDs.size());
        for (String appUID : listAppUIDs) {
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/app-uid/" + appUID);
            resp = getResponse(client, uri);
            TimelineEntity entity = resp.getEntity(TimelineEntity.class);
            assertNotNull(entity);
        }
        List<String> listEntityUIDs = new ArrayList<String>();
        for (String appUID : listAppUIDs) {
            TimelineReaderContext context = TimelineUIDConverter.APPLICATION_UID.decodeUID(appUID);
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/app-uid/" + appUID + "/entities/type1");
            resp = getResponse(client, uri);
            Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
            });
            assertNotNull(entities);
            for (TimelineEntity entity : entities) {
                String entityUID = (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
                listEntityUIDs.add(entityUID);
                assertEquals(TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(new TimelineReaderContext(context.getClusterId(), context.getUserId(), context.getFlowName(), context.getFlowRunId(), context.getAppId(), "type1", entity.getIdPrefix(), entity.getId())), entityUID);
            }
        }
        assertEquals(2, listEntityUIDs.size());
        for (String entityUID : listEntityUIDs) {
            uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/entity-uid/" + entityUID);
            resp = getResponse(client, uri);
            TimelineEntity entity = resp.getEntity(TimelineEntity.class);
            assertNotNull(entity);
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/flow-uid/dummy:flow/runs");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/run-uid/dummy:flowrun");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/run-uid/some:dummy:flow:123v456");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/run-uid/dummy:flowrun/apps");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/app-uid/dummy:app");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/app-uid/dummy:app/entities/type1");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/entity-uid/dummy:entity");
        verifyHttpResponse(client, uri, Status.BAD_REQUEST);
    } finally {
        client.destroy();
    }
}
263954.9413136hadoop
public void testReadEntitiesIsRelatedTo() throws Exception {
    TimelineFilterList irt = new TimelineFilterList(Operator.OR);
    irt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    irt.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    int isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("hello") && !timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity ids' should have been hello and hello1");
        }
    }
    assertEquals(3, isRelatedToCnt);
    TimelineFilterList irt1 = new TimelineFilterList();
    irt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3"))));
    irt1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity id should have been hello2");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt2 = new TimelineFilterList(Operator.OR);
    irt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    irt2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("hello") && !timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity ids' should have been hello and hello1");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt3 = new TimelineFilterList();
    irt3.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity id should have been hello1");
        }
    }
    assertEquals(0, isRelatedToCnt);
    TimelineFilterList irt4 = new TimelineFilterList();
    irt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3"))));
    irt4.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto5"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList irt5 = new TimelineFilterList();
    irt5.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task1", new HashSet<Object>(Arrays.asList("relatedto3", "relatedto7"))));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task", new HashSet<Object>(Arrays.asList("relatedto1"))));
    list1.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "dummy_task", new HashSet<Object>(Arrays.asList("relatedto4"))));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "task2", new HashSet<Object>(Arrays.asList("relatedto4"))));
    TimelineFilterList irt6 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    isRelatedToCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        isRelatedToCnt += timelineEntity.getIsRelatedToEntities().size();
        if (!timelineEntity.getId().equals("hello1")) {
            Assert.fail("Entity id should have been hello1");
        }
    }
    assertEquals(0, isRelatedToCnt);
}
261295.987781kafka
private AbstractResponse getResponse(ApiKeys apikey, short version) {
    switch(apikey) {
        case PRODUCE:
            return createProduceResponse();
        case FETCH:
            return createFetchResponse(version);
        case LIST_OFFSETS:
            return createListOffsetResponse(version);
        case METADATA:
            return createMetadataResponse();
        case LEADER_AND_ISR:
            return createLeaderAndIsrResponse(version);
        case STOP_REPLICA:
            return createStopReplicaResponse();
        case UPDATE_METADATA:
            return createUpdateMetadataResponse();
        case CONTROLLED_SHUTDOWN:
            return createControlledShutdownResponse();
        case OFFSET_COMMIT:
            return createOffsetCommitResponse();
        case OFFSET_FETCH:
            return createOffsetFetchResponse(version);
        case FIND_COORDINATOR:
            return createFindCoordinatorResponse(version);
        case JOIN_GROUP:
            return createJoinGroupResponse(version);
        case HEARTBEAT:
            return createHeartBeatResponse();
        case LEAVE_GROUP:
            return createLeaveGroupResponse();
        case SYNC_GROUP:
            return createSyncGroupResponse(version);
        case DESCRIBE_GROUPS:
            return createDescribeGroupResponse();
        case LIST_GROUPS:
            return createListGroupsResponse(version);
        case SASL_HANDSHAKE:
            return createSaslHandshakeResponse();
        case API_VERSIONS:
            return createApiVersionResponse();
        case CREATE_TOPICS:
            return createCreateTopicResponse();
        case DELETE_TOPICS:
            return createDeleteTopicsResponse();
        case DELETE_RECORDS:
            return createDeleteRecordsResponse();
        case INIT_PRODUCER_ID:
            return createInitPidResponse();
        case OFFSET_FOR_LEADER_EPOCH:
            return createLeaderEpochResponse();
        case ADD_PARTITIONS_TO_TXN:
            return createAddPartitionsToTxnResponse(version);
        case ADD_OFFSETS_TO_TXN:
            return createAddOffsetsToTxnResponse();
        case END_TXN:
            return createEndTxnResponse();
        case WRITE_TXN_MARKERS:
            return createWriteTxnMarkersResponse();
        case TXN_OFFSET_COMMIT:
            return createTxnOffsetCommitResponse();
        case DESCRIBE_ACLS:
            return createDescribeAclsResponse();
        case CREATE_ACLS:
            return createCreateAclsResponse();
        case DELETE_ACLS:
            return createDeleteAclsResponse(version);
        case DESCRIBE_CONFIGS:
            return createDescribeConfigsResponse(version);
        case ALTER_CONFIGS:
            return createAlterConfigsResponse();
        case ALTER_REPLICA_LOG_DIRS:
            return createAlterReplicaLogDirsResponse();
        case DESCRIBE_LOG_DIRS:
            return createDescribeLogDirsResponse();
        case SASL_AUTHENTICATE:
            return createSaslAuthenticateResponse();
        case CREATE_PARTITIONS:
            return createCreatePartitionsResponse();
        case CREATE_DELEGATION_TOKEN:
            return createCreateTokenResponse();
        case RENEW_DELEGATION_TOKEN:
            return createRenewTokenResponse();
        case EXPIRE_DELEGATION_TOKEN:
            return createExpireTokenResponse();
        case DESCRIBE_DELEGATION_TOKEN:
            return createDescribeTokenResponse(version);
        case DELETE_GROUPS:
            return createDeleteGroupsResponse();
        case ELECT_LEADERS:
            return createElectLeadersResponse();
        case INCREMENTAL_ALTER_CONFIGS:
            return createIncrementalAlterConfigsResponse();
        case ALTER_PARTITION_REASSIGNMENTS:
            return createAlterPartitionReassignmentsResponse();
        case LIST_PARTITION_REASSIGNMENTS:
            return createListPartitionReassignmentsResponse();
        case OFFSET_DELETE:
            return createOffsetDeleteResponse();
        case DESCRIBE_CLIENT_QUOTAS:
            return createDescribeClientQuotasResponse();
        case ALTER_CLIENT_QUOTAS:
            return createAlterClientQuotasResponse();
        case DESCRIBE_USER_SCRAM_CREDENTIALS:
            return createDescribeUserScramCredentialsResponse();
        case ALTER_USER_SCRAM_CREDENTIALS:
            return createAlterUserScramCredentialsResponse();
        case VOTE:
            return createVoteResponse();
        case BEGIN_QUORUM_EPOCH:
            return createBeginQuorumEpochResponse();
        case END_QUORUM_EPOCH:
            return createEndQuorumEpochResponse();
        case DESCRIBE_QUORUM:
            return createDescribeQuorumResponse();
        case ALTER_PARTITION:
            return createAlterPartitionResponse(version);
        case UPDATE_FEATURES:
            return createUpdateFeaturesResponse();
        case ENVELOPE:
            return createEnvelopeResponse();
        case FETCH_SNAPSHOT:
            return createFetchSnapshotResponse();
        case DESCRIBE_CLUSTER:
            return createDescribeClusterResponse();
        case DESCRIBE_PRODUCERS:
            return createDescribeProducersResponse();
        case BROKER_REGISTRATION:
            return createBrokerRegistrationResponse();
        case BROKER_HEARTBEAT:
            return createBrokerHeartbeatResponse();
        case UNREGISTER_BROKER:
            return createUnregisterBrokerResponse();
        case DESCRIBE_TRANSACTIONS:
            return createDescribeTransactionsResponse();
        case LIST_TRANSACTIONS:
            return createListTransactionsResponse();
        case ALLOCATE_PRODUCER_IDS:
            return createAllocateProducerIdsResponse();
        case CONSUMER_GROUP_HEARTBEAT:
            return createConsumerGroupHeartbeatResponse();
        case CONSUMER_GROUP_DESCRIBE:
            return createConsumerGroupDescribeResponse();
        case CONTROLLER_REGISTRATION:
            return createControllerRegistrationResponse();
        case GET_TELEMETRY_SUBSCRIPTIONS:
            return createGetTelemetrySubscriptionsResponse();
        case PUSH_TELEMETRY:
            return createPushTelemetryResponse();
        case ASSIGN_REPLICAS_TO_DIRS:
            return createAssignReplicasToDirsResponse();
        case LIST_CLIENT_METRICS_RESOURCES:
            return createListClientMetricsResourcesResponse();
        case DESCRIBE_TOPIC_PARTITIONS:
            return createDescribeTopicPartitionsResponse();
        default:
            throw new IllegalArgumentException("Unknown API key " + apikey);
    }
}
264905.971144kafka
public void testCommitWithOutOfOrderCallback() {
    createTask(initialState);
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    verifyInitializeTask();
    Answer<ConsumerRecords<byte[], byte[]>> consumerPollRebalance = invocation -> {
        rebalanceListener.getValue().onPartitionsAssigned(INITIAL_ASSIGNMENT);
        return ConsumerRecords.empty();
    };
    expectTaskGetTopic();
    expectConversionAndTransformation(null, new RecordHeaders());
    final Map<TopicPartition, OffsetAndMetadata> workerCurrentOffsets = new HashMap<>();
    workerCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
    workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    final List<TopicPartition> originalPartitions = new ArrayList<>(INITIAL_ASSIGNMENT);
    final List<TopicPartition> rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3);
    final Map<TopicPartition, OffsetAndMetadata> rebalanceOffsets = new HashMap<>();
    rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION));
    rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2));
    rebalanceOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET));
    final Map<TopicPartition, OffsetAndMetadata> postRebalanceCurrentOffsets = new HashMap<>();
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 3));
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    postRebalanceCurrentOffsets.put(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 2));
    when(sinkTask.preCommit(workerCurrentOffsets)).thenReturn(workerCurrentOffsets);
    final AtomicReference<Runnable> asyncCallbackRunner = new AtomicReference<>();
    final AtomicBoolean asyncCallbackRan = new AtomicBoolean();
    doAnswer(invocation -> {
        final Map<TopicPartition, OffsetAndMetadata> offsets = invocation.getArgument(0);
        final OffsetCommitCallback callback = invocation.getArgument(1);
        asyncCallbackRunner.set(() -> {
            callback.onComplete(offsets, null);
            asyncCallbackRan.set(true);
        });
        return null;
    }).when(consumer).commitAsync(eq(workerCurrentOffsets), any(OffsetCommitCallback.class));
    final AtomicBoolean rebalanced = new AtomicBoolean();
    Answer<ConsumerRecords<byte[], byte[]>> consumerPollRebalanced = invocation -> {
        rebalanceListener.getValue().onPartitionsRevoked(originalPartitions);
        Map<TopicPartition, Long> offsets = new HashMap<>();
        offsets.put(TOPIC_PARTITION, rebalanceOffsets.get(TOPIC_PARTITION).offset());
        offsets.put(TOPIC_PARTITION2, rebalanceOffsets.get(TOPIC_PARTITION2).offset());
        offsets.put(TOPIC_PARTITION3, rebalanceOffsets.get(TOPIC_PARTITION3).offset());
        sinkTaskContext.getValue().offset(offsets);
        rebalanceListener.getValue().onPartitionsAssigned(rebalancedPartitions);
        rebalanced.set(true);
        asyncCallbackRunner.get().run();
        long timestamp = RecordBatch.NO_TIMESTAMP;
        TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;
        List<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
        records.add(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
        records.add(new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET + recordsReturnedTp3 + 1, timestamp, timestampType, 0, 0, RAW_KEY, RAW_VALUE, new RecordHeaders(), Optional.empty()));
        recordsReturnedTp1 += 1;
        recordsReturnedTp3 += 1;
        return new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), records));
    };
    when(sinkTask.preCommit(workerCurrentOffsets)).thenReturn(workerCurrentOffsets);
    final long offsetTp1 = rebalanceOffsets.get(TOPIC_PARTITION).offset();
    final long offsetTp2 = rebalanceOffsets.get(TOPIC_PARTITION2).offset();
    final long offsetTp3 = rebalanceOffsets.get(TOPIC_PARTITION3).offset();
    when(sinkTask.preCommit(postRebalanceCurrentOffsets)).thenReturn(postRebalanceCurrentOffsets);
    when(consumer.assignment()).thenReturn(INITIAL_ASSIGNMENT).thenReturn(INITIAL_ASSIGNMENT).thenReturn(INITIAL_ASSIGNMENT).thenReturn(INITIAL_ASSIGNMENT).thenReturn(INITIAL_ASSIGNMENT).thenReturn(new HashSet<>(rebalancedPartitions)).thenReturn(new HashSet<>(rebalancedPartitions)).thenReturn(new HashSet<>(rebalancedPartitions)).thenReturn(new HashSet<>(rebalancedPartitions)).thenReturn(new HashSet<>(rebalancedPartitions));
    when(consumer.position(TOPIC_PARTITION)).thenReturn(FIRST_OFFSET).thenReturn(offsetTp1);
    when(consumer.position(TOPIC_PARTITION2)).thenReturn(FIRST_OFFSET).thenReturn(offsetTp2);
    when(consumer.position(TOPIC_PARTITION3)).thenReturn(offsetTp3);
    when(consumer.poll(any(Duration.class))).thenAnswer(consumerPollRebalance).thenAnswer(expectConsumerPoll(1)).thenAnswer(consumerPollRebalanced).thenAnswer(expectConsumerPoll(1));
    workerTask.iteration();
    time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
    workerTask.iteration();
    sinkTaskContext.getValue().requestCommit();
    workerTask.iteration();
    assertSinkMetricValue("partition-count", 3);
    assertSinkMetricValue("sink-record-read-total", 3.0);
    assertSinkMetricValue("sink-record-send-total", 3.0);
    assertSinkMetricValue("sink-record-active-count", 4.0);
    assertSinkMetricValue("sink-record-active-count-max", 4.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.71429);
    assertSinkMetricValue("offset-commit-seq-no", 2.0);
    assertSinkMetricValue("offset-commit-completion-total", 1.0);
    assertSinkMetricValue("offset-commit-skip-total", 1.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 2.0);
    assertTaskMetricValue("batch-size-avg", 1.0);
    assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 1.0);
    assertTrue(asyncCallbackRan.get());
    assertTrue(rebalanced.get());
    assertEquals(postRebalanceCurrentOffsets, workerTask.currentOffsets());
    assertEquals(rebalanceOffsets, workerTask.lastCommittedOffsets());
    verify(sinkTask).close(new ArrayList<>(workerCurrentOffsets.keySet()));
    verify(consumer).commitSync(anyMap());
    verify(sinkTask).open(rebalancedPartitions);
    verify(consumer).seek(TOPIC_PARTITION, offsetTp1);
    verify(consumer).seek(TOPIC_PARTITION2, offsetTp2);
    verify(consumer).seek(TOPIC_PARTITION3, offsetTp3);
    time.sleep(WorkerConfig.OFFSET_COMMIT_TIMEOUT_MS_DEFAULT);
    sinkTaskContext.getValue().requestCommit();
    workerTask.iteration();
    final ArgumentCaptor<OffsetCommitCallback> callback = ArgumentCaptor.forClass(OffsetCommitCallback.class);
    verify(consumer).commitAsync(eq(postRebalanceCurrentOffsets), callback.capture());
    callback.getValue().onComplete(postRebalanceCurrentOffsets, null);
    assertEquals(postRebalanceCurrentOffsets, workerTask.currentOffsets());
    assertEquals(postRebalanceCurrentOffsets, workerTask.lastCommittedOffsets());
    assertSinkMetricValue("partition-count", 3);
    assertSinkMetricValue("sink-record-read-total", 4.0);
    assertSinkMetricValue("sink-record-send-total", 4.0);
    assertSinkMetricValue("sink-record-active-count", 0.0);
    assertSinkMetricValue("sink-record-active-count-max", 4.0);
    assertSinkMetricValue("sink-record-active-count-avg", 0.5555555);
    assertSinkMetricValue("offset-commit-seq-no", 3.0);
    assertSinkMetricValue("offset-commit-completion-total", 2.0);
    assertSinkMetricValue("offset-commit-skip-total", 1.0);
    assertTaskMetricValue("status", "running");
    assertTaskMetricValue("running-ratio", 1.0);
    assertTaskMetricValue("pause-ratio", 0.0);
    assertTaskMetricValue("batch-size-max", 2.0);
    assertTaskMetricValue("batch-size-avg", 1.0);
    assertTaskMetricValue("offset-commit-max-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-avg-time-ms", 0.0);
    assertTaskMetricValue("offset-commit-failure-percentage", 0.0);
    assertTaskMetricValue("offset-commit-success-percentage", 1.0);
}
264836.81159kafka
public void testLastConsumerProtocolMemberSessionTimeoutInConsumerGroup() {
    String groupId = "group-id";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = Collections.singletonList(new ConsumerGroupMemberMetadataValue.ClassicProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))))));
    ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setRebalanceTimeoutMs(45000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(protocols)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build();
    ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setRebalanceTimeoutMs(45000).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.DOWNGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(member1).withMember(member2).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    context.replay(CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
        }
    }));
    context.commit();
    context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(10).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Collections.emptyList()));
    context.assertSessionTimeout(groupId, memberId2, 45000);
    MockCoordinatorTimer.ExpiredTimeout<Void, CoordinatorRecord> timeout = context.sleep(45000 + 1).get(0);
    assertEquals(consumerGroupSessionTimeoutKey(groupId, memberId2), timeout.key);
    byte[] assignment = Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1), new TopicPartition(fooTopicName, 2), new TopicPartition(barTopicName, 0), new TopicPartition(barTopicName, 1)))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, assignment);
        }
    };
    ClassicGroup expectedClassicGroup = new ClassicGroup(new LogContext(), groupId, STABLE, context.time, context.metrics, 10, Optional.ofNullable(ConsumerProtocol.PROTOCOL_TYPE), Optional.ofNullable("range"), Optional.ofNullable(memberId1), Optional.of(context.time.milliseconds()));
    expectedClassicGroup.add(new ClassicGroupMember(memberId1, Optional.ofNullable(member1.instanceId()), member1.clientId(), member1.clientHost(), member1.rebalanceTimeoutMs(), member1.classicProtocolSessionTimeout().get(), ConsumerProtocol.PROTOCOL_TYPE, member1.supportedJoinGroupRequestProtocols(), assignment));
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId1), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newGroupSubscriptionMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupEpochTombstoneRecord(groupId), CoordinatorRecordHelpers.newGroupMetadataRecord(expectedClassicGroup, assignments, MetadataVersion.latestTesting()));
    assertUnorderedListEquals(expectedRecords.subList(0, 2), timeout.result.records().subList(0, 2));
    assertUnorderedListEquals(expectedRecords.subList(2, 4), timeout.result.records().subList(2, 4));
    assertRecordEquals(expectedRecords.get(4), timeout.result.records().get(4));
    assertUnorderedListEquals(expectedRecords.subList(5, 7), timeout.result.records().subList(5, 7));
    assertRecordsEquals(expectedRecords.subList(7, 10), timeout.result.records().subList(7, 10));
    verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.STABLE, null);
    verify(context.metrics, times(1)).onClassicGroupStateTransition(null, STABLE);
    ScheduledTimeout<Void, CoordinatorRecord> heartbeatTimeout = context.timer.timeout(classicGroupHeartbeatKey(groupId, memberId1));
    assertNotNull(heartbeatTimeout);
    ScheduledTimeout<Void, CoordinatorRecord> groupJoinTimeout = context.timer.timeout(classicGroupJoinKey(groupId));
    assertNotNull(groupJoinTimeout);
    ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    assertTrue(classicGroup.isInState(PREPARING_REBALANCE));
}
264013.133154kafka
public void testRestore() throws Exception {
    final long startTime = SEGMENT_INTERVAL * 2;
    final long increment = SEGMENT_INTERVAL / 2;
    windowStore.put(0, "zero", startTime);
    windowStore.put(1, "one", startTime + increment);
    windowStore.put(2, "two", startTime + increment * 2);
    windowStore.put(3, "three", startTime + increment * 3);
    windowStore.put(4, "four", startTime + increment * 4);
    windowStore.put(5, "five", startTime + increment * 5);
    windowStore.put(6, "six", startTime + increment * 6);
    windowStore.put(7, "seven", startTime + increment * 7);
    windowStore.put(8, "eight", startTime + increment * 8);
    windowStore.flush();
    windowStore.close();
    Utils.delete(baseDir);
    windowStore = buildWindowStore(RETENTION_PERIOD, WINDOW_SIZE, false, Serdes.Integer(), Serdes.String());
    windowStore.init((StateStoreContext) context, windowStore);
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE))));
    final List<KeyValue<byte[], byte[]>> changeLog = new ArrayList<>();
    for (final ProducerRecord<Object, Object> record : recordCollector.collected()) {
        changeLog.add(new KeyValue<>(((Bytes) record.key()).get(), (byte[]) record.value()));
    }
    context.restore(STORE_NAME, changeLog);
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(0, ofEpochMilli(startTime - WINDOW_SIZE), ofEpochMilli(startTime + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(1, ofEpochMilli(startTime + increment - WINDOW_SIZE), ofEpochMilli(startTime + increment + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(2, ofEpochMilli(startTime + increment * 2 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 2 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(3, ofEpochMilli(startTime + increment * 3 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 3 + WINDOW_SIZE))));
    if (storeType == StoreType.RocksDBWindowStore) {
        assertEquals(new HashSet<>(Collections.emptyList()), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    } else {
        assertEquals(new HashSet<>(Collections.singletonList("four")), valuesToSet(windowStore.fetch(4, ofEpochMilli(startTime + increment * 4 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 4 + WINDOW_SIZE))));
    }
    assertEquals(new HashSet<>(Collections.singletonList("five")), valuesToSet(windowStore.fetch(5, ofEpochMilli(startTime + increment * 5 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 5 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("six")), valuesToSet(windowStore.fetch(6, ofEpochMilli(startTime + increment * 6 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 6 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("seven")), valuesToSet(windowStore.fetch(7, ofEpochMilli(startTime + increment * 7 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 7 + WINDOW_SIZE))));
    assertEquals(new HashSet<>(Collections.singletonList("eight")), valuesToSet(windowStore.fetch(8, ofEpochMilli(startTime + increment * 8 - WINDOW_SIZE), ofEpochMilli(startTime + increment * 8 + WINDOW_SIZE))));
    windowStore.flush();
    assertEquals(Utils.mkSet(segments.segmentName(4L), segments.segmentName(5L), segments.segmentName(6L)), segmentDirs(baseDir));
}
261994.1332130wildfly
protected void parseConnectionDefinitions_1_0(final XMLExtendedStreamReader reader, final Map<String, ModelNode> map, final Map<String, HashMap<String, ModelNode>> configMap, final boolean isXa) throws XMLStreamException, ParserException, ValidateException {
    final ModelNode connectionDefinitionNode = new ModelNode();
    connectionDefinitionNode.get(OP).set(ADD);
    String poolName = null;
    String jndiName = null;
    int attributeSize = reader.getAttributeCount();
    boolean poolDefined = Boolean.FALSE;
    for (int i = 0; i < attributeSize; i++) {
        ConnectionDefinition.Attribute attribute = ConnectionDefinition.Attribute.forName(reader.getAttributeLocalName(i));
        String value = reader.getAttributeValue(i);
        switch(attribute) {
            case ENABLED:
                {
                    ENABLED.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case JNDI_NAME:
                {
                    jndiName = value;
                    JNDI_NAME.parseAndSetParameter(jndiName, connectionDefinitionNode, reader);
                    break;
                }
            case POOL_NAME:
                {
                    poolName = value;
                    break;
                }
            case USE_JAVA_CONTEXT:
                {
                    USE_JAVA_CONTEXT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case USE_CCM:
                {
                    USE_CCM.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case SHARABLE:
                {
                    SHARABLE.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case ENLISTMENT:
                {
                    ENLISTMENT.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            case CLASS_NAME:
                {
                    CLASS_NAME.parseAndSetParameter(value, connectionDefinitionNode, reader);
                    break;
                }
            default:
                throw ParseUtils.unexpectedAttribute(reader, i);
        }
    }
    if (poolName == null || poolName.trim().equals("")) {
        if (jndiName != null && jndiName.trim().length() != 0) {
            if (jndiName.contains("/")) {
                poolName = jndiName.substring(jndiName.lastIndexOf("/") + 1);
            } else {
                poolName = jndiName.substring(jndiName.lastIndexOf(":") + 1);
            }
        } else {
            throw ParseUtils.missingRequired(reader, EnumSet.of(ConnectionDefinition.Attribute.JNDI_NAME));
        }
    }
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (Activation.Tag.forName(reader.getLocalName()) == Activation.Tag.CONNECTION_DEFINITION) {
                        map.put(poolName, connectionDefinitionNode);
                        return;
                    } else {
                        if (ConnectionDefinition.Tag.forName(reader.getLocalName()) == ConnectionDefinition.Tag.UNKNOWN) {
                            throw ParseUtils.unexpectedEndElement(reader);
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(ConnectionDefinition.Tag.forName(reader.getLocalName())) {
                        case CONFIG_PROPERTY:
                            {
                                if (!configMap.containsKey(poolName)) {
                                    configMap.put(poolName, new HashMap<String, ModelNode>(0));
                                }
                                parseConfigProperties(reader, configMap.get(poolName));
                                break;
                            }
                        case SECURITY:
                            {
                                parseSecuritySettings(reader, connectionDefinitionNode);
                                break;
                            }
                        case TIMEOUT:
                            {
                                parseTimeOut(reader, connectionDefinitionNode);
                                break;
                            }
                        case VALIDATION:
                            {
                                parseValidation(reader, connectionDefinitionNode);
                                break;
                            }
                        case XA_POOL:
                            {
                                if (!isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parseXaPool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case POOL:
                            {
                                if (isXa) {
                                    throw ParseUtils.unexpectedElement(reader);
                                }
                                if (poolDefined) {
                                    throw new ParserException(bundle.multiplePools());
                                }
                                parsePool(reader, connectionDefinitionNode);
                                poolDefined = true;
                                break;
                            }
                        case RECOVERY:
                            {
                                parseRecovery(reader, connectionDefinitionNode);
                                break;
                            }
                        default:
                            throw ParseUtils.unexpectedElement(reader);
                    }
                    break;
                }
        }
    }
    throw ParseUtils.unexpectedEndElement(reader);
}
261428.7838135wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (!DeploymentTypeMarker.isType(DeploymentType.EAR, deploymentUnit)) {
        return;
    }
    final ResourceRoot deploymentRoot = phaseContext.getDeploymentUnit().getAttachment(Attachments.DEPLOYMENT_ROOT);
    final VirtualFile virtualFile = deploymentRoot.getRoot();
    deploymentRoot.putAttachment(Attachments.INDEX_RESOURCE_ROOT, false);
    ModuleRootMarker.mark(deploymentRoot, false);
    String libDirName = DEFAULT_LIB_DIR;
    final boolean appXmlPresent = deploymentRoot.getRoot().getChild("META-INF/application.xml").exists();
    final EarMetaData earMetaData = deploymentUnit.getAttachment(org.jboss.as.ee.structure.Attachments.EAR_METADATA);
    if (earMetaData != null) {
        final String xmlLibDirName = earMetaData.getLibraryDirectory();
        if (xmlLibDirName != null) {
            if (xmlLibDirName.length() == 1 && xmlLibDirName.charAt(0) == '/') {
                throw EeLogger.ROOT_LOGGER.rootAsLibraryDirectory();
            }
            libDirName = xmlLibDirName;
        }
    }
    Map<String, MountedDeploymentOverlay> overlays = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_OVERLAY_LOCATIONS);
    try {
        final VirtualFile libDir;
        if (!libDirName.isEmpty()) {
            libDir = virtualFile.getChild(libDirName);
            if (libDir.exists()) {
                List<VirtualFile> libArchives = libDir.getChildren(CHILD_ARCHIVE_FILTER);
                for (final VirtualFile child : libArchives) {
                    String relativeName = child.getPathNameRelativeTo(deploymentRoot.getRoot());
                    MountedDeploymentOverlay overlay = overlays.get(relativeName);
                    final MountHandle mountHandle;
                    if (overlay != null) {
                        overlay.remountAsZip(false);
                        mountHandle = MountHandle.create(null);
                    } else {
                        final Closeable closable = child.isFile() ? mount(child, false) : null;
                        mountHandle = MountHandle.create(closable);
                    }
                    final ResourceRoot childResource = new ResourceRoot(child, mountHandle);
                    if (child.getName().toLowerCase(Locale.ENGLISH).endsWith(JAR_EXTENSION)) {
                        ModuleRootMarker.mark(childResource);
                        deploymentUnit.addToAttachmentList(Attachments.RESOURCE_ROOTS, childResource);
                    }
                }
            }
        } else {
            libDir = null;
        }
        final List<VirtualFile> childArchives = new ArrayList<VirtualFile>(virtualFile.getChildren(new SuffixMatchFilter(CHILD_ARCHIVE_EXTENSIONS, new VisitorAttributes() {

            @Override
            public boolean isLeavesOnly() {
                return false;
            }

            @Override
            public boolean isRecurse(VirtualFile file) {
                if (file.equals(libDir)) {
                    return false;
                }
                for (String suffix : CHILD_ARCHIVE_EXTENSIONS) {
                    if (file.getName().endsWith(suffix)) {
                        return false;
                    }
                }
                return true;
            }
        })));
        if (!appXmlPresent) {
            for (final VirtualFile child : childArchives) {
                final boolean isWarFile = child.getName().toLowerCase(Locale.ENGLISH).endsWith(WAR_EXTENSION);
                final boolean isRarFile = child.getName().toLowerCase(Locale.ENGLISH).endsWith(RAR_EXTENSION);
                this.createResourceRoot(deploymentUnit, child, isWarFile || isRarFile, isWarFile);
            }
        } else {
            final Set<VirtualFile> subDeploymentFiles = new HashSet<VirtualFile>();
            for (final ModuleMetaData module : earMetaData.getModules()) {
                if (module.getFileName().endsWith(".xml")) {
                    throw EeLogger.ROOT_LOGGER.unsupportedModuleType(module.getFileName());
                }
                final VirtualFile moduleFile = virtualFile.getChild(module.getFileName());
                if (!moduleFile.exists()) {
                    throw EeLogger.ROOT_LOGGER.cannotProcessEarModule(virtualFile, module.getFileName());
                }
                if (libDir != null) {
                    VirtualFile moduleParentFile = moduleFile.getParent();
                    if (moduleParentFile != null && libDir.equals(moduleParentFile)) {
                        throw EeLogger.ROOT_LOGGER.earModuleChildOfLibraryDirectory(libDirName, module.getFileName());
                    }
                }
                subDeploymentFiles.add(moduleFile);
                final boolean webArchive = module.getType() == ModuleType.Web;
                final ResourceRoot childResource = this.createResourceRoot(deploymentUnit, moduleFile, true, webArchive);
                childResource.putAttachment(org.jboss.as.ee.structure.Attachments.MODULE_META_DATA, module);
                if (!webArchive) {
                    ModuleRootMarker.mark(childResource);
                }
                final String alternativeDD = module.getAlternativeDD();
                if (alternativeDD != null && alternativeDD.trim().length() > 0) {
                    final VirtualFile alternateDeploymentDescriptor = deploymentRoot.getRoot().getChild(alternativeDD);
                    if (!alternateDeploymentDescriptor.exists()) {
                        throw EeLogger.ROOT_LOGGER.alternateDeploymentDescriptor(alternateDeploymentDescriptor, moduleFile);
                    }
                    switch(module.getType()) {
                        case Client:
                            childResource.putAttachment(org.jboss.as.ee.structure.Attachments.ALTERNATE_CLIENT_DEPLOYMENT_DESCRIPTOR, alternateDeploymentDescriptor);
                            break;
                        case Connector:
                            childResource.putAttachment(org.jboss.as.ee.structure.Attachments.ALTERNATE_CONNECTOR_DEPLOYMENT_DESCRIPTOR, alternateDeploymentDescriptor);
                            break;
                        case Ejb:
                            childResource.putAttachment(org.jboss.as.ee.structure.Attachments.ALTERNATE_EJB_DEPLOYMENT_DESCRIPTOR, alternateDeploymentDescriptor);
                            break;
                        case Web:
                            childResource.putAttachment(org.jboss.as.ee.structure.Attachments.ALTERNATE_WEB_DEPLOYMENT_DESCRIPTOR, alternateDeploymentDescriptor);
                            break;
                        case Service:
                            throw EeLogger.ROOT_LOGGER.unsupportedModuleType(module.getFileName());
                    }
                }
            }
            for (final VirtualFile child : childArchives) {
                if (subDeploymentFiles.contains(child)) {
                    continue;
                }
                final String fileName = child.getName().toLowerCase(Locale.ENGLISH);
                if (fileName.endsWith(SAR_EXTENSION) || fileName.endsWith(JAR_EXTENSION)) {
                    this.createResourceRoot(deploymentUnit, child, false, false);
                }
            }
        }
    } catch (IOException e) {
        throw EeLogger.ROOT_LOGGER.failedToProcessChild(e, virtualFile);
    }
}
262678.644996wildfly
protected void executeReadAttribute(final String attributeName, final OperationContext context, final T component, final PathAddress address) {
    final boolean hasPool = componentType.hasPool();
    final ModelNode result = context.getResult();
    final EJBComponentDescription componentDescription = component.getComponentDescription();
    if (COMPONENT_CLASS_NAME.getName().equals(attributeName)) {
        result.set(component.getComponentClass().getName());
    } else if (JNDI_NAMES.getName().equals(attributeName)) {
        for (ViewDescription view : componentDescription.getViews()) {
            for (String binding : view.getBindingNames()) {
                result.add(binding);
            }
        }
    } else if (BUSINESS_LOCAL.getName().equals(attributeName)) {
        for (final ViewDescription view : componentDescription.getViews()) {
            final EJBViewDescription ejbViewDescription = (EJBViewDescription) view;
            if (!ejbViewDescription.isEjb2xView() && ejbViewDescription.getMethodIntf() == MethodInterfaceType.Local) {
                result.add(ejbViewDescription.getViewClassName());
            }
        }
    } else if (BUSINESS_REMOTE.getName().equals(attributeName)) {
        for (final ViewDescription view : componentDescription.getViews()) {
            final EJBViewDescription ejbViewDescription = (EJBViewDescription) view;
            if (!ejbViewDescription.isEjb2xView() && ejbViewDescription.getMethodIntf() == MethodInterfaceType.Remote) {
                result.add(ejbViewDescription.getViewClassName());
            }
        }
    } else if (TIMEOUT_METHOD.getName().equals(attributeName)) {
        final Method timeoutMethod = component.getTimeoutMethod();
        if (timeoutMethod != null) {
            result.set(timeoutMethod.toString());
        }
    } else if (ASYNC_METHODS.getName().equals(attributeName)) {
        final SessionBeanComponentDescription sessionBeanComponentDescription = (SessionBeanComponentDescription) componentDescription;
        final Set<MethodIdentifier> asynchronousMethods = sessionBeanComponentDescription.getAsynchronousMethods();
        for (MethodIdentifier m : asynchronousMethods) {
            result.add(m.getReturnType() + ' ' + m.getName() + '(' + String.join(", ", m.getParameterTypes()) + ')');
        }
    } else if (TRANSACTION_TYPE.getName().equals(attributeName)) {
        result.set(component.isBeanManagedTransaction() ? TransactionManagementType.BEAN.name() : TransactionManagementType.CONTAINER.name());
    } else if (SECURITY_DOMAIN.getName().equals(attributeName)) {
        EJBSecurityMetaData md = component.getSecurityMetaData();
        if (md != null && md.getSecurityDomainName() != null) {
            result.set(md.getSecurityDomainName());
        }
    } else if (RUN_AS_ROLE.getName().equals(attributeName)) {
        EJBSecurityMetaData md = component.getSecurityMetaData();
        if (md != null && md.getRunAs() != null) {
            result.set(md.getRunAs());
        }
    } else if (DECLARED_ROLES.getName().equals(attributeName)) {
        EJBSecurityMetaData md = component.getSecurityMetaData();
        if (md != null) {
            result.setEmptyList();
            Set<String> roles = md.getDeclaredRoles();
            if (roles != null) {
                for (String role : roles) {
                    result.add(role);
                }
            }
        }
    } else if (componentType.hasTimer() && TIMERS.getName().equals(attributeName)) {
        addTimers(component, result);
    } else if (hasPool && POOL_AVAILABLE_COUNT.getName().equals(attributeName)) {
        final Pool<?> pool = componentType.getPool(component);
        if (pool != null) {
            result.set(pool.getAvailableCount());
        }
    } else if (hasPool && POOL_CREATE_COUNT.getName().equals(attributeName)) {
        final Pool<?> pool = componentType.getPool(component);
        if (pool != null) {
            result.set(pool.getCreateCount());
        }
    } else if (hasPool && POOL_NAME.getName().equals(attributeName)) {
        final String poolName = componentType.pooledComponent(component).getPoolName();
        if (poolName != null) {
            result.set(poolName);
        }
    } else if (hasPool && POOL_REMOVE_COUNT.getName().equals(attributeName)) {
        final Pool<?> pool = componentType.getPool(component);
        if (pool != null) {
            result.set(pool.getRemoveCount());
        }
    } else if (hasPool && POOL_CURRENT_SIZE.getName().equals(attributeName)) {
        final Pool<?> pool = componentType.getPool(component);
        if (pool != null) {
            result.set(pool.getCurrentSize());
        }
    } else if (hasPool && POOL_MAX_SIZE.getName().equals(attributeName)) {
        final Pool<?> pool = componentType.getPool(component);
        if (pool != null) {
            result.set(pool.getMaxSize());
        }
    } else {
        throw EjbLogger.ROOT_LOGGER.unknownAttribute(attributeName);
    }
}
263284.6132108wildfly
protected void executeRuntimeStep(OperationContext context, ModelNode operation) throws OperationFailedException {
    final String operationName = operation.require(OP).asString();
    final ServiceName serviceName = MessagingServices.getActiveMQServiceName(PathAddress.pathAddress(operation.get(ModelDescriptionConstants.OP_ADDR)));
    if (READ_ATTRIBUTE_OPERATION.equals(operationName) || GET_ADDRESS_SETTINGS_AS_JSON.equals(operationName)) {
        ActiveMQBroker server = null;
        if (context.getRunningMode() == RunningMode.NORMAL) {
            ServiceController<?> service = context.getServiceRegistry(false).getService(serviceName);
            if (service == null || service.getState() != ServiceController.State.UP) {
                throw MessagingLogger.ROOT_LOGGER.activeMQServerNotInstalled(serviceName.getSimpleName());
            }
            server = ActiveMQBroker.class.cast(service.getValue());
        }
        if (READ_ATTRIBUTE_OPERATION.equals(operationName)) {
            handleReadAttribute(context, operation, server);
            return;
        }
        String addressMatch = ADDRESS_MATCH.resolveModelAttribute(context, operation).asString();
        context.getResult().set(server.getAddressSettingsAsJSON(addressMatch));
        return;
    }
    if (rollbackOperationIfServerNotActive(context, operation)) {
        return;
    }
    final ActiveMQServerControl serverControl = getServerControl(context, operation);
    try {
        if (GET_CONNECTORS_AS_JSON.equals(operationName)) {
            String json = serverControl.getConnectorsAsJSON();
            context.getResult().set(json);
        } else if (RESET_ALL_MESSAGE_COUNTERS.equals(operationName)) {
            serverControl.resetAllMessageCounters();
            context.getResult();
        } else if (RESET_ALL_MESSAGE_COUNTER_HISTORIES.equals(operationName)) {
            serverControl.resetAllMessageCounterHistories();
            context.getResult();
        } else if (LIST_PREPARED_TRANSACTIONS.equals(operationName)) {
            String[] list = serverControl.listPreparedTransactions();
            reportListOfStrings(context, list);
        } else if (LIST_PREPARED_TRANSACTION_DETAILS_AS_JSON.equals(operationName)) {
            String json = serverControl.listPreparedTransactionDetailsAsJSON();
            context.getResult().set(json);
        } else if (LIST_PREPARED_TRANSACTION_DETAILS_AS_HTML.equals(operationName)) {
            String html = serverControl.listPreparedTransactionDetailsAsHTML();
            context.getResult().set(html);
        } else if (LIST_HEURISTIC_COMMITTED_TRANSACTIONS.equals(operationName)) {
            String[] list = serverControl.listHeuristicCommittedTransactions();
            reportListOfStrings(context, list);
        } else if (LIST_HEURISTIC_ROLLED_BACK_TRANSACTIONS.equals(operationName)) {
            String[] list = serverControl.listHeuristicRolledBackTransactions();
            reportListOfStrings(context, list);
        } else if (COMMIT_PREPARED_TRANSACTION.equals(operationName)) {
            String txId = TRANSACTION_AS_BASE_64.resolveModelAttribute(context, operation).asString();
            boolean committed = serverControl.commitPreparedTransaction(txId);
            context.getResult().set(committed);
        } else if (ROLLBACK_PREPARED_TRANSACTION.equals(operationName)) {
            String txId = TRANSACTION_AS_BASE_64.resolveModelAttribute(context, operation).asString();
            boolean committed = serverControl.rollbackPreparedTransaction(txId);
            context.getResult().set(committed);
        } else if (LIST_REMOTE_ADDRESSES.equals(operationName)) {
            ModelNode address = OPTIONAL_IP_ADDRESS.resolveModelAttribute(context, operation);
            String[] list = address.isDefined() ? serverControl.listRemoteAddresses(address.asString()) : serverControl.listRemoteAddresses();
            reportListOfStrings(context, list);
        } else if (CLOSE_CONNECTIONS_FOR_ADDRESS.equals(operationName)) {
            String address = REQUIRED_IP_ADDRESS.resolveModelAttribute(context, operation).asString();
            boolean closed = serverControl.closeConnectionsForAddress(address);
            context.getResult().set(closed);
        } else if (CLOSE_CONNECTIONS_FOR_USER.equals(operationName)) {
            String user = USER.resolveModelAttribute(context, operation).asString();
            boolean closed = serverControl.closeConnectionsForUser(user);
            context.getResult().set(closed);
        } else if (CLOSE_CONSUMER_CONNECTIONS_FOR_ADDRESS.equals(operationName)) {
            String address = ADDRESS_MATCH.resolveModelAttribute(context, operation).asString();
            boolean closed = serverControl.closeConsumerConnectionsForAddress(address);
            context.getResult().set(closed);
        } else if (LIST_CONNECTION_IDS.equals(operationName)) {
            String[] list = serverControl.listConnectionIDs();
            reportListOfStrings(context, list);
        } else if (LIST_PRODUCERS_INFO_AS_JSON.equals(operationName)) {
            String json = serverControl.listProducersInfoAsJSON();
            json = json.replace("lastProducedMessageID", "lastUUIDSent");
            context.getResult().set(json);
        } else if (LIST_SESSIONS.equals(operationName)) {
            String connectionID = CONNECTION_ID.resolveModelAttribute(context, operation).asString();
            String[] list = serverControl.listSessions(connectionID);
            reportListOfStrings(context, list);
        } else if (GET_ROLES.equals(operationName)) {
            String addressMatch = ADDRESS_MATCH.resolveModelAttribute(context, operation).asString();
            reportRoles(context, serverControl.getRoles(addressMatch));
        } else if (GET_ROLES_AS_JSON.equals(operationName)) {
            String addressMatch = ADDRESS_MATCH.resolveModelAttribute(context, operation).asString();
            String json = serverControl.getRolesAsJSON(addressMatch);
            reportRolesAsJSON(context, json);
        } else if (GET_ADDRESS_SETTINGS_AS_JSON.equals(operationName)) {
            String addressMatch = ADDRESS_MATCH.resolveModelAttribute(context, operation).asString();
            String json = serverControl.getAddressSettingsAsJSON(addressMatch);
            context.getResult().set(ManagementUtil.convertAddressSettingInfosAsJSON(json));
        } else if (FORCE_FAILOVER.equals(operationName)) {
            serverControl.forceFailover();
            context.getResult();
        } else {
            throw MessagingLogger.ROOT_LOGGER.unsupportedOperation(operationName);
        }
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception e) {
        context.getFailureDescription().set(e.getLocalizedMessage());
    }
}
264092.9629103wildfly
private void startedPooledConnectionFactory(ResolutionContext context, String name, ServiceBuilder<?> serviceBuilder, ServiceTarget serviceTarget, DeploymentUnit deploymentUnit, Injector<ManagedReferenceFactory> injector, boolean external) throws DeploymentUnitProcessingException, OperationFailedException {
    Map<String, String> props = new HashMap<>(properties);
    List<String> connectors = getConnectors(props);
    clearUnknownProperties(properties);
    ModelNode model = new ModelNode();
    for (String connector : connectors) {
        model.get(CONNECTORS).add(connector);
    }
    for (Map.Entry<String, String> entry : properties.entrySet()) {
        model.get(entry.getKey()).set(entry.getValue());
    }
    model.get(MIN_POOL_SIZE.getName()).set(minPoolSize);
    model.get(MAX_POOL_SIZE.getName()).set(maxPoolSize);
    if (user != null && !user.isEmpty()) {
        model.get(ConnectionFactoryAttributes.Pooled.USER.getName()).set(user);
    }
    if (password != null && !password.isEmpty()) {
        model.get(ConnectionFactoryAttributes.Pooled.PASSWORD.getName()).set(password);
    }
    if (clientId != null && !clientId.isEmpty()) {
        model.get(CommonAttributes.CLIENT_ID.getName()).set(clientId);
    }
    final String discoveryGroupName = properties.containsKey(DISCOVERY_GROUP.getName()) ? properties.get(DISCOVERY_GROUP.getName()) : null;
    if (discoveryGroupName != null) {
        model.get(DISCOVERY_GROUP.getName()).set(discoveryGroupName);
    }
    final String jgroupsChannelName = properties.containsKey(JGROUPS_CLUSTER.getName()) ? properties.get(JGROUPS_CLUSTER.getName()) : null;
    if (jgroupsChannelName != null) {
        model.get(JGROUPS_CLUSTER.getName()).set(jgroupsChannelName);
    }
    final String managedConnectionPoolClassName = properties.containsKey(MANAGED_CONNECTION_POOL.getName()) ? properties.get(MANAGED_CONNECTION_POOL.getName()) : null;
    if (managedConnectionPoolClassName != null) {
        model.get(MANAGED_CONNECTION_POOL.getName()).set(managedConnectionPoolClassName);
    }
    final Boolean enlistmentTrace = properties.containsKey(ENLISTMENT_TRACE.getName()) ? Boolean.valueOf(properties.get(ENLISTMENT_TRACE.getName())) : null;
    List<PooledConnectionFactoryConfigProperties> adapterParams = getAdapterParams(model);
    String txSupport = transactional ? XA_TX : NO_TX;
    final String serverName;
    final String pcfName = uniqueName(context, name);
    final ContextNames.BindInfo bindInfo = ContextNames.bindInfoForEnvEntry(context.getApplicationName(), context.getModuleName(), context.getComponentName(), !context.isCompUsesModule(), name);
    if (external) {
        serverName = null;
        Set<String> connectorsSocketBindings = new HashSet<>();
        Set<String> sslContextNames = new HashSet<>();
        ExternalBrokerConfigurationService configuration = (ExternalBrokerConfigurationService) deploymentUnit.getServiceRegistry().getRequiredService(MessagingSubsystemRootResourceDefinition.CONFIGURATION_CAPABILITY.getCapabilityServiceName()).getService().getValue();
        TransportConfiguration[] tcs = new TransportConfiguration[connectors.size()];
        for (int i = 0; i < tcs.length; i++) {
            tcs[i] = configuration.getConnectors().get(connectors.get(i));
            if (tcs[i].getParams().containsKey(ModelDescriptionConstants.SOCKET_BINDING)) {
                connectorsSocketBindings.add(tcs[i].getParams().get(ModelDescriptionConstants.SOCKET_BINDING).toString());
            }
            if (tcs[i].getParams().containsKey(ModelDescriptionConstants.SSL_CONTEXT)) {
                sslContextNames.add(tcs[i].getParams().get(ModelDescriptionConstants.SSL_CONTEXT).toString());
            }
        }
        DiscoveryGroupConfiguration discoveryGroupConfiguration = null;
        if (discoveryGroupName != null) {
            discoveryGroupConfiguration = configuration.getDiscoveryGroupConfigurations().get(discoveryGroupName);
        }
        if (connectors.isEmpty() && discoveryGroupConfiguration == null) {
            tcs = getExternalPooledConnectionFactory(resourceAdapter, deploymentUnit.getServiceRegistry()).getConnectors();
            for (int i = 0; i < tcs.length; i++) {
                if (tcs[i].getParams().containsKey(ModelDescriptionConstants.SOCKET_BINDING)) {
                    connectorsSocketBindings.add(tcs[i].getParams().get(ModelDescriptionConstants.SOCKET_BINDING).toString());
                }
                if (tcs[i].getParams().containsKey(ModelDescriptionConstants.SSL_CONTEXT)) {
                    sslContextNames.add(tcs[i].getParams().get(ModelDescriptionConstants.SSL_CONTEXT).toString());
                }
            }
        }
        ExternalPooledConnectionFactoryService.installService(serviceTarget, configuration, pcfName, tcs, discoveryGroupConfiguration, connectorsSocketBindings, sslContextNames, null, jgroupsChannelName, adapterParams, bindInfo, Collections.emptyList(), txSupport, minPoolSize, maxPoolSize, managedConnectionPoolClassName, enlistmentTrace, deploymentUnit.getAttachment(CAPABILITY_SERVICE_SUPPORT));
    } else {
        serverName = getActiveMQServerName(properties);
        PooledConnectionFactoryService.installService(serviceTarget, pcfName, serverName, connectors, discoveryGroupName, jgroupsChannelName, adapterParams, bindInfo, txSupport, minPoolSize, maxPoolSize, managedConnectionPoolClassName, enlistmentTrace, true);
    }
    final ServiceName referenceFactoryServiceName = ConnectionFactoryReferenceFactoryService.SERVICE_NAME_BASE.append(bindInfo.getBinderServiceName());
    serviceBuilder.addDependency(referenceFactoryServiceName, ManagedReferenceFactory.class, injector);
    String managementName = managementName(context, name);
    final DeploymentResourceSupport deploymentResourceSupport = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_RESOURCE_SUPPORT);
    final PathElement pcfPath = PathElement.pathElement(POOLED_CONNECTION_FACTORY, managementName);
    PathAddress registration;
    if (external) {
        deploymentResourceSupport.getDeploymentSubsystemModel(MessagingExtension.SUBSYSTEM_NAME);
        registration = PathAddress.pathAddress(pcfPath);
    } else {
        final PathElement serverElement = PathElement.pathElement(SERVER, serverName);
        deploymentResourceSupport.getDeploymentSubModel(MessagingExtension.SUBSYSTEM_NAME, serverElement);
        registration = PathAddress.pathAddress(serverElement, pcfPath);
    }
    MessagingXmlInstallDeploymentUnitProcessor.createDeploymentSubModel(registration, deploymentUnit);
    if (external) {
        PooledConnectionFactoryConfigurationRuntimeHandler.EXTERNAL_INSTANCE.registerResource(serverName, managementName, model);
    } else {
        PooledConnectionFactoryConfigurationRuntimeHandler.INSTANCE.registerResource(serverName, managementName, model);
    }
}
263165.6130111wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final ModuleSpecification moduleSpecification = deploymentUnit.getAttachment(Attachments.MODULE_SPECIFICATION);
    final ServiceModuleLoader loader = deploymentUnit.getAttachment(Attachments.SERVICE_MODULE_LOADER);
    if (!DeploymentTypeMarker.isType(DeploymentType.WAR, deploymentUnit)) {
        return;
    }
    WarMetaData warMetaData = deploymentUnit.getAttachment(WarMetaData.ATTACHMENT_KEY);
    assert warMetaData != null;
    final Module module = deploymentUnit.getAttachment(Attachments.MODULE);
    if (module == null) {
        throw UndertowLogger.ROOT_LOGGER.failedToResolveModule(deploymentUnit);
    }
    final ClassLoader classLoader = module.getClassLoader();
    ScisMetaData scisMetaData = deploymentUnit.getAttachment(ScisMetaData.ATTACHMENT_KEY);
    if (scisMetaData == null) {
        scisMetaData = new ScisMetaData();
        deploymentUnit.putAttachment(ScisMetaData.ATTACHMENT_KEY, scisMetaData);
    }
    Set<ServletContainerInitializer> scis = scisMetaData.getScis();
    Set<Class<? extends ServletContainerInitializer>> sciClasses = new HashSet<>();
    if (scis == null) {
        scis = new LinkedHashSet<>();
        scisMetaData.setScis(scis);
    }
    Map<ServletContainerInitializer, Set<Class<?>>> handlesTypes = scisMetaData.getHandlesTypes();
    if (handlesTypes == null) {
        handlesTypes = new HashMap<ServletContainerInitializer, Set<Class<?>>>();
        scisMetaData.setHandlesTypes(handlesTypes);
    }
    for (ModuleDependency dependency : moduleSpecification.getAllDependencies()) {
        if (!dependency.isImportServices()) {
            continue;
        }
        try {
            Module depModule = loader.loadModule(dependency.getIdentifier());
            ServiceLoader<ServletContainerInitializer> serviceLoader = depModule.loadService(ServletContainerInitializer.class);
            for (ServletContainerInitializer service : serviceLoader) {
                if (sciClasses.add(service.getClass())) {
                    scis.add(service);
                }
            }
        } catch (ModuleLoadException e) {
            if (!dependency.isOptional()) {
                throw UndertowLogger.ROOT_LOGGER.errorLoadingSCIFromModule(dependency.getIdentifier().toString(), e);
            }
        }
    }
    List<String> order = warMetaData.getOrder();
    Map<String, VirtualFile> localScis = warMetaData.getScis();
    if (order != null && localScis != null) {
        for (String jar : order) {
            VirtualFile sci = localScis.get(jar);
            if (sci != null) {
                scis.addAll(loadSci(classLoader, sci, jar, true, sciClasses));
            }
        }
    }
    if (localScis != null) {
        VirtualFile warDeployedScis = localScis.get("classes");
        if (warDeployedScis != null) {
            scis.addAll(loadSci(classLoader, warDeployedScis, deploymentUnit.getName(), true, sciClasses));
        }
    }
    Map<Class<?>, Set<ServletContainerInitializer>> typesMap = new HashMap<Class<?>, Set<ServletContainerInitializer>>();
    for (ServletContainerInitializer service : scis) {
        try {
            if (service.getClass().isAnnotationPresent(HandlesTypes.class)) {
                HandlesTypes handlesTypesAnnotation = service.getClass().getAnnotation(HandlesTypes.class);
                Class<?>[] typesArray = handlesTypesAnnotation.value();
                if (typesArray != null) {
                    for (Class<?> type : typesArray) {
                        Set<ServletContainerInitializer> servicesSet = typesMap.get(type);
                        if (servicesSet == null) {
                            servicesSet = new HashSet<ServletContainerInitializer>();
                            typesMap.put(type, servicesSet);
                        }
                        servicesSet.add(service);
                        handlesTypes.put(service, new HashSet<Class<?>>());
                    }
                }
            }
        } catch (ArrayStoreException e) {
            throw UndertowLogger.ROOT_LOGGER.missingClassInAnnotation(HandlesTypes.class.getSimpleName(), service.getClass().getName());
        }
    }
    Class<?>[] typesArray = typesMap.keySet().toArray(new Class<?>[0]);
    final CompositeIndex index = deploymentUnit.getAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX);
    if (index == null) {
        throw UndertowLogger.ROOT_LOGGER.unableToResolveAnnotationIndex(deploymentUnit);
    }
    final CompositeIndex parent;
    if (deploymentUnit.getParent() != null) {
        parent = deploymentUnit.getParent().getAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX);
    } else {
        parent = null;
    }
    CompositeIndex parentIndex = deploymentUnit.getParent() == null ? null : deploymentUnit.getParent().getAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX);
    for (Class<?> type : typesArray) {
        DotName className = DotName.createSimple(type.getName());
        Set<ClassInfo> classInfos = new HashSet<>();
        classInfos.addAll(processHandlesType(className, type, index, parent));
        if (parentIndex != null) {
            classInfos.addAll(processHandlesType(className, type, parentIndex, parent));
        }
        Set<Class<?>> classes = loadClassInfoSet(classInfos, classLoader);
        Set<ServletContainerInitializer> sciSet = typesMap.get(type);
        for (ServletContainerInitializer sci : sciSet) {
            handlesTypes.get(sci).addAll(classes);
        }
    }
}
272105.3732110cassandra
protected void scrubInternal(SSTableRewriter writer) throws IOException {
    try {
        nextIndexKey = indexAvailable() ? ByteBufferUtil.readWithShortLength(indexFile) : null;
        if (indexAvailable()) {
            long firstRowPositionFromIndex = rowIndexEntrySerializer.deserializePositionAndSkip(indexFile);
            assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
        }
    } catch (Throwable ex) {
        throwIfFatal(ex);
        nextIndexKey = null;
        nextPartitionPositionFromIndex = dataFile.length();
        if (indexFile != null)
            indexFile.seek(indexFile.length());
    }
    DecoratedKey prevKey = null;
    while (!dataFile.isEOF()) {
        if (scrubInfo.isStopRequested())
            throw new CompactionInterruptedException(scrubInfo.getCompactionInfo());
        long partitionStart = dataFile.getFilePointer();
        outputHandler.debug("Reading row at %d", partitionStart);
        DecoratedKey key = null;
        try {
            ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
            if (!cfs.metadata.getLocal().isIndex())
                cfs.metadata.getLocal().partitionKeyType.validate(raw);
            key = sstable.decorateKey(raw);
        } catch (Throwable th) {
            throwIfFatal(th);
        }
        long dataStartFromIndex = -1;
        long dataSizeFromIndex = -1;
        updateIndexKey();
        if (indexAvailable()) {
            if (currentIndexKey != null) {
                dataStartFromIndex = currentPartitionPositionFromIndex + 2 + currentIndexKey.remaining();
                dataSizeFromIndex = nextPartitionPositionFromIndex - dataStartFromIndex;
            }
        }
        long dataStart = dataFile.getFilePointer();
        String keyName = key == null ? "(unreadable key)" : keyString(key);
        outputHandler.debug("partition %s is %s", keyName, FBUtilities.prettyPrintMemory(dataSizeFromIndex));
        assert currentIndexKey != null || !indexAvailable();
        try {
            if (key == null)
                throw new IOError(new IOException("Unable to read partition key from data file"));
            if (currentIndexKey != null && !key.getKey().equals(currentIndexKey)) {
                throw new IOError(new IOException(String.format("Key from data file (%s) does not match key from index file (%s)", "_too big_", ByteBufferUtil.bytesToHex(currentIndexKey))));
            }
            if (indexFile != null && dataSizeFromIndex > dataFile.length())
                throw new IOError(new IOException("Impossible partition size (greater than file length): " + dataSizeFromIndex));
            if (indexFile != null && dataStart != dataStartFromIndex)
                outputHandler.warn("Data file partition position %d differs from index file row position %d", dataStart, dataStartFromIndex);
            if (tryAppend(prevKey, key, writer))
                prevKey = key;
        } catch (Throwable th) {
            throwIfFatal(th);
            outputHandler.warn(th, "Error reading partition %s (stacktrace follows):", keyName);
            if (currentIndexKey != null && (key == null || !key.getKey().equals(currentIndexKey) || dataStart != dataStartFromIndex)) {
                outputHandler.output("Retrying from partition index; data is %s bytes starting at %s", dataSizeFromIndex, dataStartFromIndex);
                key = sstable.decorateKey(currentIndexKey);
                try {
                    if (!cfs.metadata.getLocal().isIndex())
                        cfs.metadata.getLocal().partitionKeyType.validate(key.getKey());
                    dataFile.seek(dataStartFromIndex);
                    if (tryAppend(prevKey, key, writer))
                        prevKey = key;
                } catch (Throwable th2) {
                    throwIfFatal(th2);
                    throwIfCannotContinue(key, th2);
                    outputHandler.warn(th2, "Retry failed too. Skipping to next partition (retry's stacktrace follows)");
                    badPartitions++;
                    if (!seekToNextPartition())
                        break;
                }
            } else {
                throwIfCannotContinue(key, th);
                outputHandler.warn("Partition starting at position %d is unreadable; skipping to next", dataStart);
                badPartitions++;
                if (currentIndexKey != null)
                    if (!seekToNextPartition())
                        break;
            }
        }
    }
}
273782.014137cassandra
public void testGrantedAllTables() throws Throwable {
    useSuperUser();
    executeNet(String.format("CREATE ROLE %s WITH LOGIN = TRUE AND password='%s'", user, pass));
    executeNet("GRANT CREATE ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
    String table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
    String index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
    String type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
    String mv = KEYSPACE_PER_TEST + ".alltables_mv_01";
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    useUser(user, pass);
    final String spinAssertTable = table;
    Util.spinAssertEquals(false, () -> {
        try {
            assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1"));
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "ALTER TYPE " + type + " ADD c bigint");
    assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "DROP TYPE " + type);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP MATERIALIZED VIEW " + mv);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP INDEX " + index);
    useSuperUser();
    executeNet("GRANT ALTER, DROP, SELECT, MODIFY ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
    useUser(user, pass);
    Util.spinAssertEquals(false, () -> {
        try {
            assertUnauthorizedQuery("User user has no ALTER permission on <keyspace " + KEYSPACE_PER_TEST + "> or any of its parents", "ALTER KEYSPACE " + KEYSPACE_PER_TEST + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
    assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    executeNet("DROP MATERIALIZED VIEW " + mv);
    executeNet("DROP INDEX " + index);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    executeNet("ALTER TYPE " + type + " ADD c bigint");
    executeNet("DROP TYPE " + type);
    table = createTableName();
    type = KEYSPACE_PER_TEST + "." + createTypeName();
    mv = KEYSPACE_PER_TEST + ".alltables_mv_02";
    executeNet("CREATE TYPE " + type + " (a int, b text)");
    executeNet(formatQuery(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))"));
    index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
    assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    executeNet("DROP MATERIALIZED VIEW " + mv);
    executeNet("DROP INDEX " + index);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    executeNet("ALTER TYPE " + type + " ADD c bigint");
    executeNet("DROP TYPE " + type);
    useSuperUser();
    executeNet("REVOKE ALTER, DROP, SELECT, MODIFY ON ALL TABLES IN KEYSPACE " + KEYSPACE_PER_TEST + " FROM " + user);
    table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
    index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
    type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
    mv = KEYSPACE_PER_TEST + ".alltables_mv_03";
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    useUser(user, pass);
    final String spinAssertTable2 = table;
    Util.spinAssertEquals(false, () -> {
        try {
            assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable2 + "> or any of its parents", "INSERT INTO " + spinAssertTable2 + " (pk, ck, val, val_2) VALUES (1, 1, 1, '1')");
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "UPDATE " + table + " SET val = 1 WHERE pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "DELETE FROM " + table + " WHERE pk = 1 AND ck = 2");
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + table + " WHERE pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "TRUNCATE TABLE " + table);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE " + table + " ADD val_3 int"));
    assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DROP TABLE " + table));
    assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "ALTER TYPE " + type + " ADD c bigint");
    assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "DROP TYPE " + type);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP MATERIALIZED VIEW " + mv);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP INDEX " + index);
}
274373.441143cassandra
public void testBoundsAsClusteringWithMultiSliceRestrictionsWithAscendingDescendingColumnMix() {
    TableMetadata tableMetadata = newTableMetadata(Sort.ASC, Sort.DESC, Sort.ASC, Sort.DESC);
    ByteBuffer value1 = ByteBufferUtil.bytes(1);
    ByteBuffer value2 = ByteBufferUtil.bytes(2);
    ByteBuffer value3 = ByteBufferUtil.bytes(3);
    ByteBuffer value4 = ByteBufferUtil.bytes(4);
    Restriction restriction = newMultiSlice(tableMetadata, 0, Operator.GT, value1, value2, value3, value4);
    ClusteringColumnRestrictions restrictions = restrictions(tableMetadata, restriction);
    Slices slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(4, slices.size());
    Slice slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3);
    assertEndBound(slice, true, value1, value2);
    slice = slices.get(3);
    assertStartBound(slice, false, value1);
    assertEmptyEnd(slice);
    Restriction eq = newSingleRestriction(tableMetadata, 0, Operator.EQ, value1);
    restriction = newMultiSlice(tableMetadata, 1, Operator.GT, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction, eq);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(3, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3);
    assertEndBound(slice, true, value1, value2);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, true, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, false, value1);
    assertEmptyEnd(slice);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(4, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3);
    assertEndBound(slice, true, value1, value2, value3, value4);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3);
    assertEndBound(slice, true, value1, value2);
    slice = slices.get(3);
    assertStartBound(slice, false, value1);
    assertEmptyEnd(slice);
    restriction = newMultiSlice(tableMetadata, 0, Operator.LTE, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(4, slices.size());
    slice = slices.get(0);
    assertEmptyStart(slice);
    assertEndBound(slice, false, value1);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3);
    slice = slices.get(2);
    assertStartBound(slice, true, value1, value2, value3, value4);
    assertEndBound(slice, true, value1, value2, value3);
    slice = slices.get(3);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, true, value1);
    restriction = newMultiSlice(tableMetadata, 0, Operator.LT, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(4, slices.size());
    slice = slices.get(0);
    assertEmptyStart(slice);
    assertEndBound(slice, false, value1);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3, value4);
    assertEndBound(slice, true, value1, value2, value3);
    slice = slices.get(3);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, true, value1);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GT, value1, value2, value3, value4);
    Restriction restriction2 = newMultiSlice(tableMetadata, 0, Operator.LT, value2, value3);
    restrictions = restrictions(tableMetadata, restriction, restriction2);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(5, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3);
    assertEndBound(slice, true, value1, value2);
    slice = slices.get(3);
    assertStartBound(slice, false, value1);
    assertEndBound(slice, false, value2);
    slice = slices.get(4);
    assertStartBound(slice, false, value2, value3);
    assertEndBound(slice, true, value2);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2, value3, value4);
    restriction2 = newMultiSlice(tableMetadata, 0, Operator.LTE, value4, value3, value2, value1);
    restrictions = restrictions(tableMetadata, restriction, restriction2);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(7, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3);
    assertEndBound(slice, true, value1, value2, value3, value4);
    slice = slices.get(2);
    assertStartBound(slice, false, value1, value2, value3);
    assertEndBound(slice, true, value1, value2);
    slice = slices.get(3);
    assertStartBound(slice, false, value1);
    assertEndBound(slice, false, value4);
    slice = slices.get(4);
    assertStartBound(slice, true, value4, value3);
    assertEndBound(slice, false, value4, value3, value2);
    slice = slices.get(5);
    assertStartBound(slice, true, value4, value3, value2, value1);
    assertEndBound(slice, true, value4, value3, value2);
    slice = slices.get(6);
    assertStartBound(slice, false, value4, value3);
    assertEndBound(slice, true, value4);
}
276980.171121cassandra
public void testIntersectsSingleSlice() {
    List<AbstractType<?>> types = new ArrayList<>();
    types.add(Int32Type.instance);
    types.add(Int32Type.instance);
    types.add(Int32Type.instance);
    ClusteringComparator cc = new ClusteringComparator(types);
    ClusteringPrefix.Kind sk = INCL_START_BOUND;
    ClusteringPrefix.Kind ek = INCL_END_BOUND;
    Slice slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 1, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 3, 1, 1)));
    slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 3, 1, 1)));
    slice = Slice.make(makeBound(sk, 4, 0, 0), makeBound(ek, 4, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 4, 0, 0), makeBound(ek));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 4, 0, 0), makeBound(ek, 1));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 4, 0), makeBound(ek, 1));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 2, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 2, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 1, 0)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 2, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 2, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 2, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 0, 0, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 1), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 1, 1, 1)));
    slice = Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
    slice = Slice.make(makeBound(sk, 1, 1), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
    slice = Slice.make(makeBound(sk, 1), makeBound(ek, 1, 2));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
    slice = Slice.make(makeBound(sk, 1), makeBound(ek, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 3));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 3));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 1, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 1, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 2, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk, 0), makeBound(ek, 2, 0, 0));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 0, 0)));
    slice = Slice.make(makeBound(sk), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek)));
    slice = Slice.make(makeBound(sk, 1), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek)));
    slice = Slice.make(makeBound(sk, 1), makeBound(ek, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek)));
    slice = Slice.make(makeBound(sk), makeBound(ek));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek, 1)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek, 1)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 1));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek, 2)));
    slice = Slice.make(makeBound(sk), makeBound(ek, 2));
    assertSlicesIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek, 1)));
    slice = Slice.make(makeBound(sk, 2), makeBound(ek, 3));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk), makeBound(ek, 1)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 0, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 2, 0, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 0, 0, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 1, 0), makeBound(ek, 3, 0, 0)));
    slice = Slice.make(makeBound(sk, 1, 1, 1), makeBound(ek, 1, 1, 0));
    assertSlicesDoNotIntersect(cc, slice, Slice.make(makeBound(sk, 1, 0, 0), makeBound(ek, 2, 2, 2)));
}
272857.891167elasticsearch
public void testSuggestLexer() {
    compareTokens(getSuggestTokens("test"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test");
    compareTokens(getSuggestTokens("int test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "int", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("ArrayList test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "ArrayList", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("def test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "def", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("int[] test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ATYPE), "int[]", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("ArrayList[] test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ATYPE), "ArrayList[]", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("def[] test;"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ATYPE), "def[]", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
    compareTokens(getSuggestTokens("List test = new ArrayList(); test."), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "List", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ASSIGN), "=", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.NEW), "new", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "ArrayList", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RP), ")", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.DOT), ".");
    compareTokens(getSuggestTokens("List test = new ArrayList(); test.add"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "List", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ASSIGN), "=", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.NEW), "new", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "ArrayList", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RP), ")", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.DOT), ".", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.DOTID), "add");
    compareTokens(getSuggestTokens("List test = new ArrayList(); test.add("), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "List", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ASSIGN), "=", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.NEW), "new", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "ArrayList", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RP), ")", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.DOT), ".", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.DOTID), "add", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(");
    compareTokens(getSuggestTokens("def test(int param) {return param;} test(2);"), SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "def", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.TYPE), "int", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "param", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RP), ")", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LBRACK), "{", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RETURN), "return", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "param", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RBRACK), "}", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.ID), "test", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.LP), "(", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.INTEGER), "2", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.RP), ")", SuggestLexer.VOCABULARY.getDisplayName(SuggestLexer.SEMICOLON), ";");
}
272664.21174elasticsearch
public void testParseOGCPolygonWithoutHoles() throws IOException, ParseException {
    String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(-177.0).value(10.0).endArray().startArray().value(176.0).value(15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(-177.0).value(-10.0).endArray().startArray().value(-177.0).value(10.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(180.0).value(10.0).endArray().startArray().value(180.0).value(-10.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(172.0).value(0.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertPolygon(parse(parser), false);
    }
    polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon").startArray("coordinates").startArray().startArray().value(176.0).value(15.0).endArray().startArray().value(184.0).value(15.0).endArray().startArray().value(184.0).value(0.0).endArray().startArray().value(176.0).value(-15.0).endArray().startArray().value(174.0).value(-10.0).endArray().startArray().value(176.0).value(15.0).endArray().endArray().endArray().endObject());
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        Shape shape = ShapeParser.parse(parser).buildS4J();
        ElasticsearchGeoAssertions.assertMultiPolygon(shape, true);
    }
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, polygonGeoJson)) {
        parser.nextToken();
        ElasticsearchGeoAssertions.assertMultiPolygon(parse(parser), false);
    }
}
274539.492142elasticsearch
public void testSearchSkipUnavailable() throws IOException {
    try (MockTransportService remoteTransport = startTransport("node0", new CopyOnWriteArrayList<>(), VersionInformation.CURRENT, TransportVersion.current(), threadPool)) {
        DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode();
        updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString()));
        for (int i = 0; i < 10; i++) {
            Request request = new Request("POST", "/index/_doc");
            request.setJsonEntity("{ \"field\" : \"value\" }");
            Response response = client().performRequest(request);
            assertEquals(201, response.getStatusLine().getStatusCode());
        }
        Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh"));
        assertEquals(200, refreshResponse.getStatusLine().getStatusCode());
        {
            Response response = client().performRequest(new Request("GET", "/index/_search"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
        }
        {
            Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
        }
        {
            Response response = client().performRequest(new Request("GET", "/remote1:index/_search"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(0));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0));
        }
        {
            Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
            String scrollId = objectPath.evaluate("_scroll_id");
            assertNotNull(scrollId);
            Request scrollRequest = new Request("POST", "/_search/scroll");
            scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }");
            Response scrollResponse = client().performRequest(scrollRequest);
            assertEquals(200, scrollResponse.getStatusLine().getStatusCode());
            ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse);
            assertNull(scrollObjectPath.evaluate("_clusters"));
            assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0));
        }
        remoteTransport.close();
        updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true));
        {
            Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
        }
        {
            Response response = client().performRequest(new Request("GET", "/remote1:index/_search"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(0));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(0));
        }
        {
            Response response = client().performRequest(new Request("GET", "/index,remote1:index/_search?scroll=1m"));
            assertEquals(200, response.getStatusLine().getStatusCode());
            ObjectPath objectPath = ObjectPath.createFromResponse(response);
            assertNotNull(objectPath.evaluate("_clusters"));
            assertThat(objectPath.evaluate("_clusters.total"), equalTo(2));
            assertThat(objectPath.evaluate("_clusters.successful"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.skipped"), equalTo(1));
            assertThat(objectPath.evaluate("_clusters.running"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.partial"), equalTo(0));
            assertThat(objectPath.evaluate("_clusters.failed"), equalTo(0));
            assertThat(objectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(objectPath.evaluateArraySize("hits.hits"), equalTo(10));
            String scrollId = objectPath.evaluate("_scroll_id");
            assertNotNull(scrollId);
            Request scrollRequest = new Request("POST", "/_search/scroll");
            scrollRequest.setJsonEntity("{ \"scroll_id\" : \"" + scrollId + "\" }");
            Response scrollResponse = client().performRequest(scrollRequest);
            assertEquals(200, scrollResponse.getStatusLine().getStatusCode());
            ObjectPath scrollObjectPath = ObjectPath.createFromResponse(scrollResponse);
            assertNull(scrollObjectPath.evaluate("_clusters"));
            assertThat(scrollObjectPath.evaluate("hits.total.value"), equalTo(10));
            assertThat(scrollObjectPath.evaluateArraySize("hits.hits"), equalTo(0));
        }
        updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", false));
        assertSearchConnectFailure();
        Map<String, Object> map = new HashMap<>();
        map.put("seeds", null);
        map.put("skip_unavailable", null);
        updateRemoteClusterSettings(map);
    }
}
274098.481138elasticsearch
public void testHealthWithClosedIndices() {
    createIndex("index-1");
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    createIndex("index-2");
    assertAcked(indicesAdmin().prepareClose("index-2"));
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(2));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(2));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2"), nullValue());
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)).get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-1"), nullValue());
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    createIndex("index-3", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 50).build());
    assertAcked(indicesAdmin().prepareClose("index-3"));
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).setWaitForNoInitializingShards(true).setWaitForYellowStatus().get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(3));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-3").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(3));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(1));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2"), nullValue());
        assertThat(response.getIndices().get("index-3"), nullValue());
    }
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)).get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(2));
        assertThat(response.getIndices().get("index-1"), nullValue());
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
    }
    setReplicaCount(numberOfReplicas(), "index-3");
    {
        ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get();
        assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.isTimedOut(), equalTo(false));
        assertThat(response.getIndices().size(), equalTo(3));
        assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
        assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.GREEN));
    }
}
273987.882147elasticsearch
public void testRestoreIndexWithMissingShards() throws Exception {
    disableRepoConsistencyCheck("This test leaves behind a purposely broken repository");
    logger.info("--> start 2 nodes");
    internalCluster().startNodes(2);
    logger.info("--> create an index that will have some unallocated shards");
    assertAcked(prepareCreate("test-idx-some", 2, indexSettingsNoReplicas(6)));
    ensureGreen();
    indexRandomDocs("test-idx-some", 100);
    logger.info("--> shutdown one of the nodes");
    internalCluster().stopRandomDataNode();
    assertThat(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(TimeValue.timeValueMinutes(1)).setWaitForNodes("<2").get().isTimedOut(), equalTo(false));
    logger.info("--> create an index that will have all allocated shards");
    assertAcked(prepareCreate("test-idx-all", 1, indexSettingsNoReplicas(6)));
    ensureGreen("test-idx-all");
    logger.info("--> create an index that will be closed");
    assertAcked(prepareCreate("test-idx-closed", 1, indexSettingsNoReplicas(4)));
    indexRandomDocs("test-idx-all", 100);
    indexRandomDocs("test-idx-closed", 100);
    assertAcked(indicesAdmin().prepareClose("test-idx-closed"));
    logger.info("--> create an index that will have no allocated shards");
    assertAcked(prepareCreate("test-idx-none", 1, indexSettingsNoReplicas(6).put("index.routing.allocation.include.tag", "nowhere")).setWaitForActiveShards(ActiveShardCount.NONE));
    assertTrue(indexExists("test-idx-none"));
    createRepository("test-repo", "fs");
    logger.info("--> start snapshot with default settings without a closed index - should fail");
    final SnapshotException sne = expectThrows(SnapshotException.class, clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed").setWaitForCompletion(true));
    assertThat(sne.getMessage(), containsString("the following indices have unassigned primary shards"));
    if (randomBoolean()) {
        logger.info("checking snapshot completion using status");
        clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2").setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed").setWaitForCompletion(false).setPartial(true).get();
        assertBusy(() -> {
            SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get();
            List<SnapshotStatus> snapshotStatuses = snapshotsStatusResponse.getSnapshots();
            assertEquals(snapshotStatuses.size(), 1);
            logger.trace("current snapshot status [{}]", snapshotStatuses.get(0));
            assertTrue(snapshotStatuses.get(0).getState().completed());
        }, 1, TimeUnit.MINUTES);
        SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get();
        List<SnapshotStatus> snapshotStatuses = snapshotsStatusResponse.getSnapshots();
        assertThat(snapshotStatuses.size(), equalTo(1));
        SnapshotStatus snapshotStatus = snapshotStatuses.get(0);
        assertThat(snapshotStatus.getShardsStats().getTotalShards(), equalTo(22));
        assertThat(snapshotStatus.getShardsStats().getDoneShards(), lessThan(16));
        assertThat(snapshotStatus.getShardsStats().getDoneShards(), greaterThan(10));
        assertBusy(() -> {
            SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2");
            assertTrue(snapshotInfo.state().completed());
            assertEquals(SnapshotState.PARTIAL, snapshotInfo.state());
        }, 1, TimeUnit.MINUTES);
    } else {
        logger.info("checking snapshot completion using wait_for_completion flag");
        final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2").setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed").setWaitForCompletion(true).setPartial(true).get();
        logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
        assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(22));
        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(16));
        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(10));
        assertThat(getSnapshot("test-repo", "test-snap-2").state(), equalTo(SnapshotState.PARTIAL));
    }
    assertAcked(indicesAdmin().prepareClose("test-idx-all"));
    logger.info("--> restore incomplete snapshot - should fail");
    assertFutureThrows(clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setWaitForCompletion(true).execute(), SnapshotRestoreException.class);
    logger.info("--> restore snapshot for the index that was snapshotted completely");
    RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-all").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
    assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
    assertDocCount("test-idx-all", 100L);
    logger.info("--> restore snapshot for the partial index");
    cluster().wipeIndices("test-idx-some");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-some").setPartial(true).setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
    assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), lessThan(6)));
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
    assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
    logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully");
    cluster().wipeIndices("test-idx-none");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-none").setPartial(true).setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
    assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0));
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(6));
    assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
    logger.info("--> restore snapshot for the closed index that was snapshotted completely");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-closed").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4));
    assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4));
    assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
    assertDocCount("test-idx-closed", 100L);
}
274356.679122elasticsearch
public void testSnapshotStatus() throws Exception {
    Client client = client();
    createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()).put("random", randomAlphaOfLength(10)).put("wait_after_unblock", 200));
    assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(randomIntBetween(2, 10))));
    indexRandomDocs("test-idx", 100);
    String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
    String blockedNodeId = clusterService().state().getNodes().getDataNodes().values().stream().filter(n -> n.getName().equals(blockedNode)).map(DiscoveryNode::getId).findFirst().orElse("");
    logger.info("--> snapshot");
    client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIncludeGlobalState(false).setIndices("test-idx").get();
    logger.info("--> waiting for block to kick in");
    waitForBlock(blockedNode, "test-repo");
    awaitClusterState(state -> {
        SnapshotsInProgress snapshotsInProgress = SnapshotsInProgress.get(state);
        Set<Snapshot> snapshots = snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::snapshot).collect(Collectors.toSet());
        if (snapshots.size() != 1) {
            return false;
        }
        SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshots.iterator().next());
        for (Map.Entry<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shard : entry.shards().entrySet()) {
            if (shard.getValue().nodeId().equals(blockedNodeId) == false && shard.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS == false) {
                return false;
            }
        }
        return true;
    });
    logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode);
    SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").get();
    assertThat(response.getSnapshots().size(), equalTo(1));
    SnapshotStatus snapshotStatus = response.getSnapshots().get(0);
    assertThat(snapshotStatus.getState(), equalTo(State.STARTED));
    assertThat(snapshotStatus.includeGlobalState(), equalTo(false));
    assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0));
    for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) {
        if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) {
            assertThat(shardStatus.getNodeId(), notNullValue());
        }
    }
    logger.info("--> checking snapshot status for all currently running and snapshot with empty repository");
    response = client.admin().cluster().prepareSnapshotStatus().get();
    assertThat(response.getSnapshots().size(), equalTo(1));
    snapshotStatus = response.getSnapshots().get(0);
    assertThat(snapshotStatus.getState(), equalTo(State.STARTED));
    assertThat(snapshotStatus.includeGlobalState(), equalTo(false));
    assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0));
    for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) {
        if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) {
            assertThat(shardStatus.getNodeId(), notNullValue());
        }
    }
    logger.info("--> checking that _current returns the currently running snapshot");
    GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().get();
    assertThat(getResponse.getSnapshots().size(), equalTo(1));
    SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0);
    assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS));
    snapshotStatus = client.admin().cluster().prepareSnapshotStatus().get().getSnapshots().get(0);
    assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getTotalShards()));
    assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getDoneShards()));
    assertThat(snapshotInfo.shardFailures().size(), equalTo(0));
    logger.info("--> unblocking blocked node");
    unblockNode("test-repo", blockedNode);
    awaitNoMoreRunningOperations();
    snapshotInfo = getSnapshot("test-repo", "test-snap");
    logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
    logger.info("--> checking snapshot status again after snapshot is done");
    response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get();
    snapshotStatus = response.getSnapshots().get(0);
    assertThat(snapshotStatus.getIndices().size(), equalTo(1));
    assertThat(snapshotStatus.includeGlobalState(), equalTo(false));
    SnapshotIndexStatus indexStatus = snapshotStatus.getIndices().get("test-idx");
    assertThat(indexStatus, notNullValue());
    assertThat(indexStatus.getShardsStats().getInitializingShards(), equalTo(0));
    assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(snapshotInfo.failedShards()));
    assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards()));
    assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards()));
    logger.info("--> checking snapshot status after it is done with empty repository");
    response = client.admin().cluster().prepareSnapshotStatus().get();
    assertThat(response.getSnapshots().size(), equalTo(0));
    logger.info("--> checking that _current no longer returns the snapshot");
    assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").get().getSnapshots().isEmpty(), equalTo(true));
    SnapshotMissingException ex = expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist"));
    assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage());
    response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").setIgnoreUnavailable(true).get();
    assertTrue(response.getSnapshots().isEmpty());
    response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap", "test-snap-doesnt-exist").setIgnoreUnavailable(true).get();
    assertEquals(1, response.getSnapshots().size());
    assertEquals("test-snap", response.getSnapshots().get(0).getSnapshot().getSnapshotId().getName());
}
271336.343109elasticsearch
public boolean match(DiscoveryNode node) {
    for (Map.Entry<String, String[]> entry : filters.entrySet()) {
        String attr = entry.getKey();
        String[] values = entry.getValue();
        if ("_ip".equals(attr)) {
            String publishAddress = null;
            if (node.getAddress() instanceof TransportAddress) {
                publishAddress = NetworkAddress.format(node.getAddress().address().getAddress());
            }
            boolean match = matchByIP(values, node.getHostAddress(), publishAddress);
            if (opType == OpType.AND) {
                if (match) {
                    continue;
                }
                return false;
            }
            if (match && opType == OpType.OR) {
                return true;
            }
        } else if ("_host_ip".equals(attr)) {
            boolean match = matchByIP(values, node.getHostAddress(), null);
            if (opType == OpType.AND) {
                if (match) {
                    continue;
                }
                return false;
            }
            if (match && opType == OpType.OR) {
                return true;
            }
        } else if ("_publish_ip".equals(attr)) {
            String address = null;
            if (node.getAddress() instanceof TransportAddress) {
                address = NetworkAddress.format(node.getAddress().address().getAddress());
            }
            boolean match = matchByIP(values, address, null);
            if (opType == OpType.AND) {
                if (match) {
                    continue;
                }
                return false;
            }
            if (match && opType == OpType.OR) {
                return true;
            }
        } else if ("_host".equals(attr)) {
            for (String value : values) {
                if (Regex.simpleMatch(value, node.getHostName()) || Regex.simpleMatch(value, node.getHostAddress())) {
                    if (opType == OpType.OR) {
                        return true;
                    }
                } else {
                    if (opType == OpType.AND) {
                        return false;
                    }
                }
            }
        } else if ("_id".equals(attr)) {
            for (String value : values) {
                if (node.getId().equals(value)) {
                    if (opType == OpType.OR) {
                        return true;
                    }
                } else {
                    if (opType == OpType.AND) {
                        return false;
                    }
                }
            }
        } else if ("_name".equals(attr) || "name".equals(attr)) {
            for (String value : values) {
                if (Regex.simpleMatch(value, node.getName())) {
                    if (opType == OpType.OR) {
                        return true;
                    }
                } else {
                    if (opType == OpType.AND) {
                        return false;
                    }
                }
            }
        } else {
            String nodeAttributeValue = node.getAttributes().get(attr);
            if (nodeAttributeValue == null) {
                if (opType == OpType.AND) {
                    return false;
                } else {
                    continue;
                }
            }
            for (String value : values) {
                if (Regex.simpleMatch(value, nodeAttributeValue)) {
                    if (opType == OpType.OR) {
                        return true;
                    }
                } else {
                    if (opType == OpType.AND) {
                        return false;
                    }
                }
            }
        }
    }
    if (opType == OpType.OR) {
        return false;
    } else {
        return true;
    }
}
272760.5827109elasticsearch
public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOException {
    String fieldName = null;
    Object value = null;
    float boost = AbstractQueryBuilder.DEFAULT_BOOST;
    String minimumShouldMatch = null;
    String analyzer = null;
    Operator operator = MatchQueryBuilder.DEFAULT_OPERATOR;
    Fuzziness fuzziness = null;
    int prefixLength = FuzzyQuery.defaultPrefixLength;
    int maxExpansion = FuzzyQuery.defaultMaxExpansions;
    boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions;
    String fuzzyRewrite = null;
    boolean lenient = MatchQueryParser.DEFAULT_LENIENCY;
    ZeroTermsQueryOption zeroTermsQuery = MatchQueryParser.DEFAULT_ZERO_TERMS_QUERY;
    boolean autoGenerateSynonymsPhraseQuery = true;
    String queryName = null;
    String currentFieldName = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_OBJECT) {
            throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
            fieldName = currentFieldName;
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    currentFieldName = parser.currentName();
                } else if (token.isValue()) {
                    if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        value = parser.objectText();
                    } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        analyzer = parser.text();
                    } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        boost = parser.floatValue();
                    } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        fuzziness = Fuzziness.parse(parser);
                    } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        prefixLength = parser.intValue();
                    } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        maxExpansion = parser.intValue();
                    } else if (OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        operator = Operator.fromString(parser.text());
                    } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        minimumShouldMatch = parser.textOrNull();
                    } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        fuzzyRewrite = parser.textOrNull();
                    } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        fuzzyTranspositions = parser.booleanValue();
                    } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        lenient = parser.booleanValue();
                    } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        String zeroTermsValue = parser.text();
                        if ("none".equalsIgnoreCase(zeroTermsValue)) {
                            zeroTermsQuery = ZeroTermsQueryOption.NONE;
                        } else if ("all".equalsIgnoreCase(zeroTermsValue)) {
                            zeroTermsQuery = ZeroTermsQueryOption.ALL;
                        } else {
                            throw new ParsingException(parser.getTokenLocation(), "Unsupported zero_terms_query value [" + zeroTermsValue + "]");
                        }
                    } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        queryName = parser.text();
                    } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) {
                        autoGenerateSynonymsPhraseQuery = parser.booleanValue();
                    } else if (parser.getRestApiVersion() == RestApiVersion.V_7 && CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                        throw new ParsingException(parser.getTokenLocation(), CUTOFF_FREQUENCY_DEPRECATION_MSG);
                    } else {
                        throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] query does not support [" + currentFieldName + "]");
                    }
                } else {
                    throw new ParsingException(parser.getTokenLocation(), "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
                }
            }
        } else {
            throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName());
            fieldName = parser.currentName();
            value = parser.objectText();
        }
    }
    if (value == null) {
        throw new ParsingException(parser.getTokenLocation(), "No text specified for text query");
    }
    MatchQueryBuilder matchQuery = new MatchQueryBuilder(fieldName, value);
    matchQuery.operator(operator);
    matchQuery.analyzer(analyzer);
    matchQuery.minimumShouldMatch(minimumShouldMatch);
    if (fuzziness != null) {
        matchQuery.fuzziness(fuzziness);
    }
    matchQuery.fuzzyRewrite(fuzzyRewrite);
    matchQuery.prefixLength(prefixLength);
    matchQuery.fuzzyTranspositions(fuzzyTranspositions);
    matchQuery.maxExpansions(maxExpansion);
    matchQuery.lenient(lenient);
    matchQuery.zeroTermsQuery(zeroTermsQuery);
    matchQuery.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery);
    matchQuery.queryName(queryName);
    matchQuery.boost(boost);
    return matchQuery;
}
272904.627106elasticsearch
public static SimpleQueryStringBuilder fromXContent(XContentParser parser) throws IOException {
    String currentFieldName = null;
    String queryBody = null;
    float boost = AbstractQueryBuilder.DEFAULT_BOOST;
    String queryName = null;
    String minimumShouldMatch = null;
    Map<String, Float> fieldsAndWeights = null;
    Operator defaultOperator = null;
    String analyzerName = null;
    int flags = SimpleQueryStringFlag.ALL.value();
    Boolean lenient = null;
    boolean analyzeWildcard = SimpleQueryStringBuilder.DEFAULT_ANALYZE_WILDCARD;
    String quoteFieldSuffix = null;
    boolean autoGenerateSynonymsPhraseQuery = true;
    int fuzzyPrefixLenght = SimpleQueryStringBuilder.DEFAULT_FUZZY_PREFIX_LENGTH;
    int fuzzyMaxExpansions = SimpleQueryStringBuilder.DEFAULT_FUZZY_MAX_EXPANSIONS;
    boolean fuzzyTranspositions = SimpleQueryStringBuilder.DEFAULT_FUZZY_TRANSPOSITIONS;
    MultiMatchQueryBuilder.Type type = SimpleQueryStringBuilder.DEFAULT_TYPE;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_ARRAY) {
            if (FIELDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                List<String> fields = new ArrayList<>();
                while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
                    fields.add(parser.text());
                }
                fieldsAndWeights = QueryParserHelper.parseFieldsAndWeights(fields);
            } else {
                throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] query does not support [" + currentFieldName + "]");
            }
        } else if (token.isValue()) {
            if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                queryBody = parser.text();
            } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                boost = parser.floatValue();
            } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                analyzerName = parser.text();
            } else if (DEFAULT_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                defaultOperator = Operator.fromString(parser.text());
            } else if (FLAGS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) {
                    flags = SimpleQueryStringFlag.resolveFlags(parser.text());
                } else {
                    flags = parser.intValue();
                    if (flags < 0) {
                        flags = SimpleQueryStringFlag.ALL.value();
                    }
                }
            } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                lenient = parser.booleanValue();
            } else if (ANALYZE_WILDCARD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                analyzeWildcard = parser.booleanValue();
            } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                queryName = parser.text();
            } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                minimumShouldMatch = parser.textOrNull();
            } else if (QUOTE_FIELD_SUFFIX_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                quoteFieldSuffix = parser.textOrNull();
            } else if (GENERATE_SYNONYMS_PHRASE_QUERY.match(currentFieldName, parser.getDeprecationHandler())) {
                autoGenerateSynonymsPhraseQuery = parser.booleanValue();
            } else if (FUZZY_PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                fuzzyPrefixLenght = parser.intValue();
            } else if (FUZZY_MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                fuzzyMaxExpansions = parser.intValue();
            } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                fuzzyTranspositions = parser.booleanValue();
            } else if (TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                type = MultiMatchQueryBuilder.Type.parse(parser.text(), parser.getDeprecationHandler());
            } else {
                throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] unsupported field [" + parser.currentName() + "]");
            }
        } else {
            throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
        }
    }
    if (queryBody == null) {
        throw new ParsingException(parser.getTokenLocation(), "[" + SimpleQueryStringBuilder.NAME + "] query text missing");
    }
    SimpleQueryStringBuilder qb = new SimpleQueryStringBuilder(queryBody);
    if (fieldsAndWeights != null) {
        qb.fields(fieldsAndWeights);
    }
    qb.boost(boost).analyzer(analyzerName).queryName(queryName).minimumShouldMatch(minimumShouldMatch);
    qb.flags(flags).defaultOperator(defaultOperator);
    if (lenient != null) {
        qb.lenient(lenient);
    }
    qb.analyzeWildcard(analyzeWildcard).boost(boost).quoteFieldSuffix(quoteFieldSuffix);
    qb.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery);
    qb.fuzzyPrefixLength(fuzzyPrefixLenght);
    qb.fuzzyMaxExpansions(fuzzyMaxExpansions);
    qb.fuzzyTranspositions(fuzzyTranspositions);
    qb.type(type);
    return qb;
}
272956.6717129elasticsearch
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
    SearchHighlightContext.Field field = fieldContext.field;
    FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
    MappedFieldType fieldType = fieldContext.fieldType;
    boolean fixBrokenAnalysis = fieldContext.context.containsBrokenAnalysis(fieldContext.fieldName);
    if (canHighlight(fieldType) == false) {
        throw new IllegalArgumentException("the field [" + fieldContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter");
    }
    Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT;
    if (fieldContext.cache.containsKey(CACHE_KEY) == false) {
        fieldContext.cache.put(CACHE_KEY, new HighlighterEntry());
    }
    HighlighterEntry cache = (HighlighterEntry) fieldContext.cache.get(CACHE_KEY);
    FieldHighlightEntry entry = cache.fields.get(fieldType);
    if (entry == null) {
        FragListBuilder fragListBuilder;
        if (field.fieldOptions().numberOfFragments() == 0) {
            fragListBuilder = new SingleFragListBuilder();
        } else {
            fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ? new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
        }
        Function<Source, FragmentsBuilder> fragmentsBuilderSupplier = fragmentsBuilderSupplier(field, fieldType, fieldContext.context, fixBrokenAnalysis);
        entry = new FieldHighlightEntry();
        if (field.fieldOptions().requireFieldMatch()) {
            entry.fieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query, hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
        } else {
            entry.noFieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query, hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
        }
        entry.fragListBuilder = fragListBuilder;
        entry.fragmentsBuilderSupplier = fragmentsBuilderSupplier;
        if (cache.fvh == null) {
            cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
        }
        CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
        cache.fields.put(fieldType, entry);
    }
    final FieldQuery fieldQuery;
    if (field.fieldOptions().requireFieldMatch()) {
        fieldQuery = entry.fieldMatchFieldQuery;
    } else {
        fieldQuery = entry.noFieldMatchFieldQuery;
    }
    cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit());
    String[] fragments;
    FragmentsBuilder fragmentsBuilder = entry.fragmentsBuilderSupplier.apply(hitContext.source());
    int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().numberOfFragments();
    int fragmentCharSize = field.fieldOptions().numberOfFragments() == 0 ? Integer.MAX_VALUE : field.fieldOptions().fragmentCharSize();
    if (field.fieldOptions().matchedFields() != null && field.fieldOptions().matchedFields().isEmpty() == false) {
        fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), fieldType.name(), field.fieldOptions().matchedFields(), fragmentCharSize, numberOfFragments, entry.fragListBuilder, fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
    } else {
        fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(), fieldType.name(), fragmentCharSize, numberOfFragments, entry.fragListBuilder, fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
    }
    if (CollectionUtils.isEmpty(fragments) == false) {
        return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
    }
    int noMatchSize = fieldContext.field.fieldOptions().noMatchSize();
    if (noMatchSize > 0) {
        FieldFragList fieldFragList = new SimpleFieldFragList(-1);
        fieldFragList.add(0, noMatchSize, Collections.emptyList());
        fragments = fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), fieldType.name(), fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
        if (CollectionUtils.isEmpty(fragments) == false) {
            return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
        }
    }
    return null;
}
271684.5311174elasticsearch
private void testCase(boolean withScroll, boolean withCollapse) throws Exception {
    final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
    Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
    DiscoveryNode primaryNode = DiscoveryNodeUtils.create("node1");
    DiscoveryNode replicaNode = DiscoveryNodeUtils.create("node2");
    lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
    lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
    int numShards = randomIntBetween(10, 20);
    int numConcurrent = randomIntBetween(1, 4);
    AtomicInteger numWithTopDocs = new AtomicInteger();
    AtomicInteger successfulOps = new AtomicInteger();
    AtomicBoolean canReturnNullResponse = new AtomicBoolean(false);
    SearchTransportService searchTransportService = new SearchTransportService(null, null, null) {

        @Override
        public void sendExecuteQuery(Transport.Connection connection, ShardSearchRequest request, SearchTask task, SearchActionListener<? super SearchPhaseResult> listener) {
            int shardId = request.shardId().id();
            if (request.canReturnNullResponseIfMatchNoDocs()) {
                canReturnNullResponse.set(true);
            }
            if (request.getBottomSortValues() != null) {
                assertNotEquals(shardId, (int) request.getBottomSortValues().getFormattedSortValues()[0]);
                numWithTopDocs.incrementAndGet();
            }
            QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("N/A", 123), new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), null);
            try {
                SortField sortField = new SortField("timestamp", SortField.Type.LONG);
                if (withCollapse) {
                    queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldGroups("collapse_field", new TotalHits(1, withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, new SortField[] { sortField }, new Object[] { 0L }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
                } else {
                    queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldDocs(new TotalHits(1, withScroll ? TotalHits.Relation.EQUAL_TO : TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { request.shardId().id() }) }, new SortField[] { sortField }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
                }
                queryResult.from(0);
                queryResult.size(1);
                successfulOps.incrementAndGet();
                queryResult.incRef();
                new Thread(() -> ActionListener.respondAndRelease(listener, queryResult)).start();
            } finally {
                queryResult.decRef();
            }
        }
    };
    CountDownLatch latch = new CountDownLatch(1);
    GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter("idx", new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS), numShards, randomBoolean(), primaryNode, replicaNode);
    final SearchRequest searchRequest = new SearchRequest();
    searchRequest.setMaxConcurrentShardRequests(numConcurrent);
    searchRequest.setBatchedReduceSize(2);
    searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp")));
    if (withScroll) {
        searchRequest.scroll(TimeValue.timeValueMillis(100));
    } else {
        searchRequest.source().trackTotalHitsUpTo(2);
    }
    if (withCollapse) {
        searchRequest.source().collapse(new CollapseBuilder("collapse_field"));
    }
    searchRequest.allowPartialSearchResults(false);
    SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder());
    SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap());
    try (QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer(searchRequest, EsExecutors.DIRECT_EXECUTOR_SERVICE, new NoopCircuitBreaker(CircuitBreaker.REQUEST), controller, task::isCancelled, task.getProgressListener(), shardsIter.size(), exc -> {
    })) {
        SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction(logger, null, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", AliasFilter.EMPTY), Collections.emptyMap(), EsExecutors.DIRECT_EXECUTOR_SERVICE, resultConsumer, searchRequest, null, shardsIter, timeProvider, new ClusterState.Builder(new ClusterName("test")).build(), task, SearchResponse.Clusters.EMPTY) {

            @Override
            protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
                return new SearchPhase("test") {

                    @Override
                    public void run() {
                        latch.countDown();
                    }
                };
            }
        };
        action.start();
        latch.await();
        assertThat(successfulOps.get(), equalTo(numShards));
        if (withScroll) {
            assertFalse(canReturnNullResponse.get());
            assertThat(numWithTopDocs.get(), equalTo(0));
        } else {
            assertTrue(canReturnNullResponse.get());
            if (withCollapse) {
                assertThat(numWithTopDocs.get(), equalTo(0));
            } else {
                assertThat(numWithTopDocs.get(), greaterThanOrEqualTo(1));
            }
        }
        SearchPhaseController.ReducedQueryPhase phase = action.results.reduce();
        assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1));
        if (withScroll) {
            assertThat(phase.totalHits().value, equalTo((long) numShards));
            assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.EQUAL_TO));
        } else {
            assertThat(phase.totalHits().value, equalTo(2L));
            assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO));
        }
        assertThat(phase.sortedTopDocs().scoreDocs().length, equalTo(1));
        assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class));
        assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1));
        assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0));
    }
}
273752.08136elasticsearch
public void testCCSRemoteReduce() throws Exception {
    int numClusters = randomIntBetween(1, 10);
    DiscoveryNode[] nodes = new DiscoveryNode[numClusters];
    Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
    Settings.Builder builder = Settings.builder();
    boolean skipUnavailable = randomBoolean();
    MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable);
    Settings settings = builder.build();
    boolean local = randomBoolean();
    OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null;
    int totalClusters = numClusters + (local ? 1 : 0);
    TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0);
    ResolvedIndices mockResolvedIndices = createMockResolvedIndices(localIndices, remoteIndicesByCluster);
    try (MockTransportService service = MockTransportService.createNewService(settings, VersionInformation.CURRENT, TransportVersion.current(), threadPool, null)) {
        service.start();
        service.acceptIncomingRequests();
        RemoteClusterService remoteClusterService = service.getRemoteClusterService();
        {
            SearchRequest searchRequest = new SearchRequest();
            final CountDownLatch latch = new CountDownLatch(1);
            SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
            final SetOnce<SearchResponse> response = new SetOnce<>();
            LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> {
                newValue.incRef();
                response.set(newValue);
            }), latch);
            TaskId parentTaskId = new TaskId("n", 1);
            SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap());
            TransportSearchAction.ccsRemoteReduce(task, parentTaskId, searchRequest, mockResolvedIndices, new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
            if (localIndices == null) {
                assertNull(setOnce.get());
            } else {
                Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
                assertEquals("", tuple.v1().getLocalClusterAlias());
                assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
                resolveWithEmptySearchResponse(tuple);
            }
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            SearchResponse searchResponse = response.get();
            try {
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
                assertEquals(totalClusters, searchResponse.getClusters().getTotal());
                assertEquals(totalClusters, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL));
                assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases());
            } finally {
                searchResponse.decRef();
            }
        }
        {
            SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().from(5).size(6);
            SearchRequest searchRequest = new SearchRequest(new String[] { "*", "*:*" }, sourceBuilder);
            final CountDownLatch latch = new CountDownLatch(1);
            SetOnce<Tuple<SearchRequest, ActionListener<SearchResponse>>> setOnce = new SetOnce<>();
            final SetOnce<SearchResponse> response = new SetOnce<>();
            LatchedActionListener<SearchResponse> listener = new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(newValue -> {
                newValue.incRef();
                response.set(newValue);
            }), latch);
            TaskId parentTaskId = new TaskId("n", 1);
            SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap());
            TransportSearchAction.ccsRemoteReduce(task, parentTaskId, searchRequest, mockResolvedIndices, new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, threadPool, listener, (r, l) -> setOnce.set(Tuple.tuple(r, l)));
            if (localIndices == null) {
                assertNull(setOnce.get());
            } else {
                Tuple<SearchRequest, ActionListener<SearchResponse>> tuple = setOnce.get();
                assertEquals("", tuple.v1().getLocalClusterAlias());
                assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class));
                resolveWithEmptySearchResponse(tuple);
            }
            awaitLatch(latch, 5, TimeUnit.SECONDS);
            SearchResponse searchResponse = response.get();
            try {
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL));
                assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED));
                assertEquals(totalClusters, searchResponse.getClusters().getTotal());
                assertEquals(totalClusters, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL));
                assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases());
            } finally {
                searchResponse.decRef();
            }
        }
    } finally {
        for (MockTransportService mockTransportService : mockTransportServices) {
            mockTransportService.close();
        }
    }
}
271227.0111178elasticsearch
private void doTestMarkFloodStageIndicesReadOnly(boolean testMaxHeadroom) {
    AllocationService allocation = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current()).put("index.routing.allocation.require._id", "node2")).numberOfShards(1).numberOfReplicas(0)).put(IndexMetadata.builder("test_1").settings(settings(IndexVersion.current()).put("index.routing.allocation.require._id", "node1")).numberOfShards(1).numberOfReplicas(0)).put(IndexMetadata.builder("test_2").settings(settings(IndexVersion.current()).put("index.routing.allocation.require._id", "node1")).numberOfShards(1).numberOfReplicas(0)).put(IndexMetadata.builder("frozen").settings(settings(IndexVersion.current()).put("index.routing.allocation.require._id", "frozen")).numberOfShards(1).numberOfReplicas(0)).build();
    RoutingTable routingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).addAsNew(metadata.index("test_1")).addAsNew(metadata.index("test_2")).addAsNew(metadata.index("frozen")).build();
    final ClusterState clusterState = applyStartedShardsUntilNoChange(ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).nodes(DiscoveryNodes.builder().add(newNormalNode("node1")).add(newNormalNode("node2")).add(newFrozenOnlyNode("frozen"))).build(), allocation);
    AtomicBoolean reroute = new AtomicBoolean(false);
    AtomicReference<Set<String>> indices = new AtomicReference<>();
    AtomicLong currentTime = new AtomicLong();
    DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> clusterState, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, (reason, priority, listener) -> {
        assertTrue(reroute.compareAndSet(false, true));
        assertThat(priority, equalTo(Priority.HIGH));
        listener.onResponse(null);
    }) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, Releasable onCompletion, boolean readOnly) {
            assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly));
            assertTrue(readOnly);
            onCompletion.close();
        }
    };
    final long totalBytes = testMaxHeadroom ? ByteSizeValue.ofGb(10000).getBytes() : 100;
    Map<String, DiskUsage> builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(99).getBytes() : 4));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(250).getBytes() : 30));
    builder.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 10000)).getBytes() : between(0, 100)));
    final ClusterInfo initialClusterInfo = clusterInfo(builder);
    monitor.onNewInfo(initialClusterInfo);
    assertTrue(reroute.get());
    assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indices.get());
    indices.set(null);
    reroute.set(false);
    monitor.onNewInfo(initialClusterInfo);
    assertFalse(reroute.get());
    indices.set(null);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(99).getBytes() : 4));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(100).getBytes() : 5));
    builder.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 19)).getBytes() : between(0, 4)));
    currentTime.addAndGet(randomLongBetween(60000, 120000));
    monitor.onNewInfo(clusterInfo(builder));
    assertTrue(reroute.get());
    assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indices.get());
    IndexMetadata indexMetadata = IndexMetadata.builder(clusterState.metadata().index("test_2")).settings(Settings.builder().put(clusterState.metadata().index("test_2").getSettings()).put(IndexMetadata.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true)).build();
    final ClusterState anotherFinalClusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(clusterState.metadata().index("test"), false).put(clusterState.metadata().index("test_1"), false).put(indexMetadata, true).build()).blocks(ClusterBlocks.builder().addBlocks(indexMetadata).build()).build();
    assertTrue(anotherFinalClusterState.blocks().indexBlocked(ClusterBlockLevel.WRITE, "test_2"));
    monitor = new DiskThresholdMonitor(Settings.EMPTY, () -> anotherFinalClusterState, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, currentTime::get, (reason, priority, listener) -> {
        assertTrue(reroute.compareAndSet(false, true));
        assertThat(priority, equalTo(Priority.HIGH));
        listener.onResponse(null);
    }) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, Releasable onCompletion, boolean readOnly) {
            assertTrue(indices.compareAndSet(null, indicesToMarkReadOnly));
            assertTrue(readOnly);
            onCompletion.close();
        }
    };
    indices.set(null);
    reroute.set(false);
    builder = new HashMap<>();
    builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(99).getBytes() : 4));
    builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(100).getBytes() : 5));
    builder.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", totalBytes, testMaxHeadroom ? ByteSizeValue.ofGb(between(0, 19)).getBytes() : between(0, 4)));
    monitor.onNewInfo(clusterInfo(builder));
    assertTrue(reroute.get());
    assertEquals(Collections.singleton("test_1"), indices.get());
}
275303.491137elasticsearch
public void testBalanceAllNodesStartedAddIndex() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 1).put("cluster.routing.allocation.node_initial_primaries_recoveries", 3).put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    logger.info("Adding three node and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(false));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    logger.info("Another round of rebalancing");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    routingNodes = clusterState.getRoutingNodes();
    assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the more shards");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(false));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
    assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).count(), equalTo(2L));
    logger.info("Add new index 3 shards 1 replica");
    metadata = Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder("test1").settings(indexSettings(IndexVersion.current(), 3, 1))).build();
    RoutingTable updatedRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState.routingTable()).addAsNew(metadata.index("test1")).build();
    clusterState = ClusterState.builder(clusterState).metadata(metadata).routingTable(updatedRoutingTable).build();
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(false));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    logger.info("Reroute, assign");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    logger.info("Reroute, start the primaries");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    logger.info("Reroute, start the replicas");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(false));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).count(), equalTo(2L));
    logger.info("kill one node");
    IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0);
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
    clusterState = strategy.disassociateDeadNodes(clusterState, true, "reroute");
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    logger.info("Start Recovering shards round 1");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(true));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
    logger.info("Start Recovering shards round 2");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    routingNodes = clusterState.getRoutingNodes();
    assertThat(assertShardStats(routingNodes), equalTo(true));
    assertThat(routingNodes.hasInactiveShards(), equalTo(false));
    assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
    assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
}
272958.0215130elasticsearch
public void testCallsListenerWhenRangeIsAvailable() {
    final byte[] fileContents = new byte[between(0, 1000)];
    final SparseFileTracker sparseFileTracker = new SparseFileTracker("test", fileContents.length);
    final Set<AtomicBoolean> listenersCalled = new HashSet<>();
    for (int i = between(0, 10); i > 0; i--) {
        waitForRandomRange(fileContents, sparseFileTracker, listenersCalled::add, gap -> processGap(fileContents, gap));
        assertTrue(listenersCalled.stream().allMatch(AtomicBoolean::get));
    }
    final ByteRange range;
    {
        final long start = randomLongBetween(0L, Math.max(0L, fileContents.length - 1));
        range = ByteRange.of(start, randomLongBetween(start, fileContents.length));
    }
    final ByteRange subRange;
    {
        if (range.length() > 1L) {
            final long start = randomLongBetween(range.start(), range.end() - 1L);
            subRange = ByteRange.of(start, randomLongBetween(start + 1L, range.end()));
        } else {
            subRange = ByteRange.of(range.start(), range.end());
        }
    }
    boolean pending = false;
    for (long i = subRange.start(); i < subRange.end(); i++) {
        if (fileContents[toIntBytes(i)] == UNAVAILABLE) {
            pending = true;
        }
    }
    if (pending == false) {
        final AtomicBoolean wasNotified = new AtomicBoolean();
        final ActionListener<Void> listener = ActionTestUtils.assertNoFailureListener(ignored -> assertTrue(wasNotified.compareAndSet(false, true)));
        final List<SparseFileTracker.Gap> gaps = sparseFileTracker.waitForRange(range, subRange, listener);
        assertTrue("All bytes of the sub range " + subRange + " are available, listener must be executed immediately", wasNotified.get());
        wasNotified.set(false);
        assertTrue(sparseFileTracker.waitForRangeIfPending(subRange, listener));
        assertTrue(wasNotified.get());
        for (final SparseFileTracker.Gap gap : gaps) {
            assertThat(gap.start(), greaterThanOrEqualTo(range.start()));
            assertThat(gap.end(), lessThanOrEqualTo(range.end()));
            for (long i = gap.start(); i < gap.end(); i++) {
                assertThat(fileContents[toIntBytes(i)], equalTo(UNAVAILABLE));
                fileContents[toIntBytes(i)] = AVAILABLE;
                gap.onProgress(i + 1L);
            }
            gap.onCompletion();
        }
    } else {
        final AtomicBoolean waitIfPendingWasNotified = new AtomicBoolean();
        final ActionListener<Void> waitIfPendingListener = ActionTestUtils.assertNoFailureListener(ignored -> assertTrue(waitIfPendingWasNotified.compareAndSet(false, true)));
        assertFalse(sparseFileTracker.waitForRangeIfPending(subRange, waitIfPendingListener));
        final AtomicBoolean wasNotified = new AtomicBoolean();
        final AtomicBoolean expectNotification = new AtomicBoolean();
        final List<SparseFileTracker.Gap> gaps = sparseFileTracker.waitForRange(range, subRange, ActionTestUtils.assertNoFailureListener(ignored -> {
            assertTrue(expectNotification.get());
            assertTrue(wasNotified.compareAndSet(false, true));
        }));
        assertFalse("Listener should not have been executed yet", wasNotified.get());
        assertTrue(sparseFileTracker.waitForRangeIfPending(subRange, waitIfPendingListener));
        assertFalse(waitIfPendingWasNotified.get());
        long triggeringProgress = -1L;
        for (long i = subRange.start(); i < subRange.end(); i++) {
            if (fileContents[toIntBytes(i)] == UNAVAILABLE) {
                triggeringProgress = i;
            }
        }
        assertThat(triggeringProgress, greaterThanOrEqualTo(0L));
        for (final SparseFileTracker.Gap gap : gaps) {
            assertThat(gap.start(), greaterThanOrEqualTo(range.start()));
            assertThat(gap.end(), lessThanOrEqualTo(range.end()));
            for (long i = gap.start(); i < gap.end(); i++) {
                assertThat(fileContents[toIntBytes(i)], equalTo(UNAVAILABLE));
                fileContents[toIntBytes(i)] = AVAILABLE;
                if (triggeringProgress == i) {
                    assertFalse(expectNotification.getAndSet(true));
                }
                assertThat("Listener should not have been called before [" + triggeringProgress + "] is reached, but it was triggered after progress got updated to [" + i + ']', wasNotified.get() && waitIfPendingWasNotified.get(), equalTo(triggeringProgress < i));
                gap.onProgress(i + 1L);
                assertThat("Listener should not have been called before [" + triggeringProgress + "] is reached, but it was triggered after progress got updated to [" + i + ']', wasNotified.get() && waitIfPendingWasNotified.get(), equalTo(triggeringProgress < i + 1L));
            }
            gap.onCompletion();
            assertThat("Listener should not have been called before [" + triggeringProgress + "] is reached, but it was triggered once gap [" + gap + "] was completed", wasNotified.get(), equalTo(triggeringProgress < gap.end()));
            assertThat(waitIfPendingWasNotified.get(), equalTo(triggeringProgress < gap.end()));
        }
        assertTrue(wasNotified.get());
        assertTrue(waitIfPendingWasNotified.get());
    }
    final AtomicBoolean wasNotified = new AtomicBoolean();
    final List<SparseFileTracker.Gap> gaps = sparseFileTracker.waitForRange(range, subRange, ActionTestUtils.assertNoFailureListener(ignored -> assertTrue(wasNotified.compareAndSet(false, true))));
    assertThat(gaps, empty());
    assertTrue(wasNotified.get());
}
272088.2512158elasticsearch
public void testAutoFollowerWithPausedActivePatterns() {
    final String remoteCluster = randomAlphaOfLength(5);
    final AtomicReference<ClusterState> remoteClusterState = new AtomicReference<>(createRemoteClusterState("patternLogs-0", true, randomLongBetween(1L, 1_000L)));
    final AtomicReference<ClusterState> localClusterState = new AtomicReference<>(ClusterState.builder(new ClusterName("local")).metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(emptyMap(), emptyMap(), emptyMap()))).build());
    final Supplier<ClusterState> localClusterStateSupplier = () -> localClusterState.updateAndGet(currentLocalState -> {
        final int nextClusterStateVersion = (int) (currentLocalState.version() + 1);
        final ClusterState nextLocalClusterState;
        if (nextClusterStateVersion == 1) {
            PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
            request.setName("patternLogs");
            request.setRemoteCluster(remoteCluster);
            request.setLeaderIndexPatterns(singletonList("patternLogs-*"));
            request.setFollowIndexNamePattern("copy-{{leader_index}}");
            nextLocalClusterState = TransportPutAutoFollowPatternAction.innerPut(request, emptyMap(), currentLocalState, remoteClusterState.get());
        } else if (nextClusterStateVersion == 2) {
            nextLocalClusterState = currentLocalState;
        } else if (nextClusterStateVersion == 3) {
            PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
            request.setName("patternDocs");
            request.setRemoteCluster(remoteCluster);
            request.setLeaderIndexPatterns(singletonList("patternDocs-*"));
            request.setFollowIndexNamePattern("copy-{{leader_index}}");
            nextLocalClusterState = TransportPutAutoFollowPatternAction.innerPut(request, emptyMap(), currentLocalState, remoteClusterState.get());
        } else if (nextClusterStateVersion == 4) {
            nextLocalClusterState = currentLocalState;
        } else if (nextClusterStateVersion == 5) {
            ActivateAutoFollowPatternAction.Request request = new ActivateAutoFollowPatternAction.Request("patternLogs", false);
            nextLocalClusterState = TransportActivateAutoFollowPatternAction.innerActivate(request, currentLocalState);
        } else if (nextClusterStateVersion == 6) {
            ActivateAutoFollowPatternAction.Request request = new ActivateAutoFollowPatternAction.Request("patternDocs", false);
            nextLocalClusterState = TransportActivateAutoFollowPatternAction.innerActivate(request, currentLocalState);
        } else {
            return currentLocalState;
        }
        return ClusterState.builder(nextLocalClusterState).version(nextClusterStateVersion).build();
    });
    final Set<String> followedIndices = ConcurrentCollections.newConcurrentSet();
    final List<AutoFollowCoordinator.AutoFollowResult> autoFollowResults = new ArrayList<>();
    final AutoFollower autoFollower = new AutoFollower(remoteCluster, autoFollowResults::addAll, localClusterStateSupplier, () -> 1L, Runnable::run) {

        int countFetches = 1;

        ClusterState lastFetchedRemoteClusterState;

        @Override
        void getRemoteClusterState(String remote, long metadataVersion, BiConsumer<ClusterStateResponse, Exception> handler) {
            assertThat(remote, equalTo(remoteCluster));
            final String[] newLeaderIndices = { "patternLogs-" + countFetches, "patternDocs-" + countFetches };
            if (countFetches == 1) {
                assertThat("first invocation, it should retrieve the metadata version 1", metadataVersion, equalTo(1L));
                lastFetchedRemoteClusterState = createRemoteClusterState(remoteClusterState.get(), newLeaderIndices);
            } else if (countFetches == 2 || countFetches == 4) {
                assertThat("no patterns changes, it should retrieve the last known metadata version + 1", metadataVersion, equalTo(lastFetchedRemoteClusterState.metadata().version() + 1));
                lastFetchedRemoteClusterState = createRemoteClusterState(remoteClusterState.get(), newLeaderIndices);
                assertThat("remote cluster state metadata version is aligned with what the auto-follower is requesting", lastFetchedRemoteClusterState.getMetadata().version(), equalTo(metadataVersion));
            } else if (countFetches == 3 || countFetches == 5) {
                assertThat("patterns have changed, it should retrieve the last known metadata version again", metadataVersion, equalTo(lastFetchedRemoteClusterState.metadata().version()));
                lastFetchedRemoteClusterState = createRemoteClusterState(remoteClusterState.get(), newLeaderIndices);
                assertThat("remote cluster state metadata version is incremented", lastFetchedRemoteClusterState.getMetadata().version(), equalTo(metadataVersion + 1));
            } else {
                fail("after the 5th invocation there are no more active patterns, the auto-follower should have stopped");
            }
            countFetches = countFetches + 1;
            remoteClusterState.set(lastFetchedRemoteClusterState);
            handler.accept(new ClusterStateResponse(lastFetchedRemoteClusterState.getClusterName(), lastFetchedRemoteClusterState, false), null);
        }

        @Override
        void createAndFollow(Map<String, String> headers, PutFollowAction.Request request, Runnable successHandler, Consumer<Exception> failureHandler) {
            assertThat(request.getRemoteCluster(), equalTo(remoteCluster));
            assertThat(request.masterNodeTimeout(), equalTo(TimeValue.MAX_VALUE));
            assertThat(request.getFollowerIndex(), startsWith("copy-"));
            followedIndices.add(request.getLeaderIndex());
            successHandler.run();
        }

        @Override
        void updateAutoFollowMetadata(Function<ClusterState, ClusterState> updateFunction, Consumer<Exception> handler) {
            localClusterState.updateAndGet(updateFunction::apply);
            handler.accept(null);
        }

        @Override
        void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List<String> patterns) {
        }
    };
    autoFollower.start();
    assertThat(autoFollowResults.size(), equalTo(7));
    assertThat(followedIndices, containsInAnyOrder("patternLogs-1", "patternLogs-2", "patternLogs-3", "patternDocs-3", "patternLogs-4", "patternDocs-4", "patternDocs-5"));
    final ClusterState finalRemoteClusterState = remoteClusterState.get();
    final ClusterState finalLocalClusterState = localClusterState.get();
    AutoFollowMetadata autoFollowMetadata = finalLocalClusterState.metadata().custom(AutoFollowMetadata.TYPE);
    assertThat(autoFollowMetadata.getPatterns().size(), equalTo(2));
    assertThat(autoFollowMetadata.getPatterns().values().stream().noneMatch(AutoFollowPattern::isActive), is(true));
    assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("patternLogs"), containsInAnyOrder(finalRemoteClusterState.metadata().index("patternLogs-0").getIndexUUID(), finalRemoteClusterState.metadata().index("patternLogs-1").getIndexUUID(), finalRemoteClusterState.metadata().index("patternLogs-2").getIndexUUID(), finalRemoteClusterState.metadata().index("patternLogs-3").getIndexUUID(), finalRemoteClusterState.metadata().index("patternLogs-4").getIndexUUID()));
    assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("patternDocs"), containsInAnyOrder(finalRemoteClusterState.metadata().index("patternDocs-1").getIndexUUID(), finalRemoteClusterState.metadata().index("patternDocs-2").getIndexUUID(), finalRemoteClusterState.metadata().index("patternDocs-3").getIndexUUID(), finalRemoteClusterState.metadata().index("patternDocs-4").getIndexUUID(), finalRemoteClusterState.metadata().index("patternDocs-5").getIndexUUID()));
}
272020.622137elasticsearch
public boolean isSnapshotEligibleForDeletion(SnapshotId snapshotId, RepositoryData.SnapshotDetails snapshotDetails, Map<SnapshotId, RepositoryData.SnapshotDetails> allSnapshots) {
    assert Strings.hasText(snapshotDetails.getSlmPolicy());
    final var snapshotState = snapshotDetails.getSnapshotState();
    final var startTimeMillis = snapshotDetails.getStartTimeMillis();
    final var snapshotName = snapshotId.getName();
    final int totalSnapshotCount = allSnapshots.size();
    final var sortedSnapshots = allSnapshots.entrySet().stream().sorted(Comparator.comparingLong(e -> e.getValue().getStartTimeMillis())).toList();
    int successCount = 0;
    long latestSuccessfulTimestamp = Long.MIN_VALUE;
    for (final var snapshot : allSnapshots.values()) {
        assert Objects.equals(snapshot.getSlmPolicy(), snapshotDetails.getSlmPolicy());
        if (snapshot.getSnapshotState() == SnapshotState.SUCCESS) {
            successCount++;
            latestSuccessfulTimestamp = Math.max(latestSuccessfulTimestamp, snapshot.getStartTimeMillis());
        }
    }
    final long newestSuccessfulTimestamp = latestSuccessfulTimestamp;
    final int successfulSnapshotCount = successCount;
    if (this.expireAfter == null && UNSUCCESSFUL_STATES.contains(snapshotState) && newestSuccessfulTimestamp > startTimeMillis) {
        logger.trace("[{}]: ELIGIBLE as it is {} and there is a more recent successful snapshot", snapshotName, snapshotState);
        return true;
    }
    if (this.maximumSnapshotCount != null && successfulSnapshotCount > this.maximumSnapshotCount) {
        final long successfulSnapsToDelete = successfulSnapshotCount - this.maximumSnapshotCount;
        boolean found = false;
        int successfulSeen = 0;
        for (final var s : sortedSnapshots) {
            if (s.getValue().getSnapshotState() == SnapshotState.SUCCESS) {
                successfulSeen++;
            }
            if (successfulSeen > successfulSnapsToDelete) {
                break;
            }
            if (s.getKey().equals(snapshotId)) {
                found = true;
                break;
            }
        }
        if (found) {
            logger.trace("[{}]: ELIGIBLE as it is one of the {} oldest snapshots with " + "{} non-failed snapshots ({} total), over the limit of {} maximum snapshots", snapshotName, successfulSnapsToDelete, successfulSnapshotCount, totalSnapshotCount, this.maximumSnapshotCount);
            return true;
        } else {
            logger.trace("[{}]: SKIPPING as it is not one of the {} oldest snapshots with " + "{} non-failed snapshots ({} total), over the limit of {} maximum snapshots", snapshotName, successfulSnapsToDelete, successfulSnapshotCount, totalSnapshotCount, this.maximumSnapshotCount);
        }
    }
    if (this.minimumSnapshotCount != null && successfulSnapshotCount <= this.minimumSnapshotCount) {
        if (UNSUCCESSFUL_STATES.contains(snapshotState) == false) {
            logger.trace("[{}]: INELIGIBLE as there are {} non-failed snapshots ({} total) and {} minimum snapshots needed", snapshotName, successfulSnapshotCount, totalSnapshotCount, this.minimumSnapshotCount);
            return false;
        } else {
            logger.trace("[{}]: SKIPPING minimum snapshot count check as this snapshot is {} and not counted " + "towards the minimum snapshot count.", snapshotName, snapshotState);
        }
    }
    if (this.expireAfter != null) {
        if (this.minimumSnapshotCount != null) {
            final boolean maybeEligible;
            if (snapshotState == SnapshotState.SUCCESS) {
                maybeEligible = sortedSnapshots.stream().filter(snap -> SnapshotState.SUCCESS.equals(snap.getValue().getSnapshotState())).limit(Math.max(0, successfulSnapshotCount - minimumSnapshotCount)).anyMatch(s -> s.getKey().equals(snapshotId));
            } else if (UNSUCCESSFUL_STATES.contains(snapshotState)) {
                maybeEligible = allSnapshots.containsKey(snapshotId);
            } else {
                logger.trace("[{}] INELIGIBLE because snapshot is in state [{}]", snapshotName, snapshotState);
                return false;
            }
            if (maybeEligible == false) {
                logger.trace("[{}]: INELIGIBLE as snapshot expiration would pass the " + "minimum number of configured snapshots ({}) to keep, regardless of age", snapshotName, this.minimumSnapshotCount);
                return false;
            }
        }
        final long snapshotAge = nowSupplier.getAsLong() - startTimeMillis;
        if (snapshotAge > this.expireAfter.getMillis()) {
            logger.trace(() -> format("[%s]: ELIGIBLE as snapshot age of %s is older than %s", snapshotName, new TimeValue(snapshotAge).toHumanReadableString(3), this.expireAfter.toHumanReadableString(3)));
            return true;
        } else {
            logger.trace(() -> format("[%s]: INELIGIBLE as snapshot age of [%sms] is newer than %s", snapshotName, new TimeValue(snapshotAge).toHumanReadableString(3), this.expireAfter.toHumanReadableString(3)));
            return false;
        }
    }
    logger.trace("[{}]: INELIGIBLE as no retention predicates matched", snapshotName);
    return false;
}
275791.31134elasticsearch
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
    return Stream.of(new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.LOGSTASH, LogstashFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INFERENCE, InferenceFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MONITORING, MonitoringFeatureSetUsage::new), new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), new NamedWriteableRegistry.Entry(ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.ManageApplicationPrivileges::createFrom), new NamedWriteableRegistry.Entry(ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom), new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AllExpression.NAME, AllExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AnyExpression.NAME, AnyExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, FieldExpression.NAME, FieldExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, ExceptExpression.NAME, ExceptExpression::new), new NamedWriteableRegistry.Entry(RemoteClusterPermissions.class, RemoteClusterPermissions.NAME, RemoteClusterPermissions::new), new NamedWriteableRegistry.Entry(RemoteClusterPermissionGroup.class, RemoteClusterPermissionGroup.NAME, RemoteClusterPermissionGroup::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.EQL, EqlFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ESQL, EsqlFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SQL, SqlFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, WatcherMetadata.TYPE, WatcherMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, WatcherMetadata.TYPE, WatcherMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, LicensesMetadata.TYPE, LicensesMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetadata.TYPE, LicensesMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new), new NamedWriteableRegistry.Entry(Task.Status.class, DownsampleShardStatus.NAME, DownsampleShardStatus::new), new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, AutoFollowMetadata.TYPE, in -> AutoFollowMetadata.readDiffFrom(Metadata.Custom.class, AutoFollowMetadata.TYPE, in)), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.IndexLifecycleMetadataDiff::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, LifecycleOperationMetadata.TYPE, LifecycleOperationMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, LifecycleOperationMetadata.TYPE, LifecycleOperationMetadata.LifecycleOperationMetadataDiff::new), new NamedWriteableRegistry.Entry(Metadata.Custom.class, SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.SnapshotLifecycleMetadataDiff::new), new NamedWriteableRegistry.Entry(LifecycleType.class, TimeseriesLifecycleType.TYPE, (in) -> TimeseriesLifecycleType.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, RolloverAction.NAME, RolloverAction::read), new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::readFrom), new NamedWriteableRegistry.Entry(LifecycleAction.class, FreezeAction.NAME, in -> FreezeAction.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, in -> UnfollowAction.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, WaitForSnapshotAction.NAME, WaitForSnapshotAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, MigrateAction.NAME, MigrateAction::readFrom), new NamedWriteableRegistry.Entry(Metadata.Custom.class, TransformMetadata.TYPE, TransformMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TransformMetadata.TYPE, TransformMetadata.TransformMetadataDiff::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), new NamedWriteableRegistry.Entry(Task.Status.class, TransformField.TASK_NAME, TransformState::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, TransformField.TASK_NAME, TransformState::new), new NamedWriteableRegistry.Entry(SyncConfig.class, TransformField.TIME.getPreferredName(), TimeSyncConfig::new), new NamedWriteableRegistry.Entry(RetentionPolicyConfig.class, TransformField.TIME.getPreferredName(), TimeRetentionPolicyConfig::new), new NamedWriteableRegistry.Entry(RetentionPolicyConfig.class, NullRetentionPolicyConfig.NAME.getPreferredName(), i -> NullRetentionPolicyConfig.INSTANCE), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VOTING_ONLY, VotingOnlyNodeFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FROZEN_INDICES, FrozenIndicesFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.AGGREGATE_METRIC, AggregateMetricFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSetUsage::new), new NamedWriteableRegistry.Entry(Task.Status.class, ExecuteEnrichPolicyStatus.NAME, ExecuteEnrichPolicyStatus::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SEARCHABLE_SNAPSHOTS, SearchableSnapshotFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAM_LIFECYCLE, DataStreamLifecycleFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ARCHIVE, ArchiveFeatureSetUsage::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, DownsampleAction.NAME, DownsampleAction::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.HEALTH_API, HealthApiFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.REMOTE_CLUSTERS, RemoteClusterFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENTERPRISE_SEARCH, EnterpriseSearchFeatureSetUsage::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new)).filter(Objects::nonNull).toList();
}
271481.711179elasticsearch
public void testRestoreMinimal() throws IOException {
    IndexShard shard = newStartedShard(true);
    int numInitialDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numInitialDocs; i++) {
        final String id = Integer.toString(i);
        indexDoc(shard, id, randomDoc());
        if (randomBoolean()) {
            shard.refresh("test");
        }
    }
    for (int i = 0; i < numInitialDocs; i++) {
        final String id = Integer.toString(i);
        if (randomBoolean()) {
            if (rarely()) {
                deleteDoc(shard, id);
            } else {
                indexDoc(shard, id, randomDoc());
            }
        }
        if (frequently()) {
            shard.refresh("test");
        }
    }
    SnapshotId snapshotId = new SnapshotId("test", "test");
    IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID());
    SourceOnlySnapshotRepository repository = new SourceOnlySnapshotRepository(createRepository());
    repository.start();
    try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) {
        IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(null);
        final PlainActionFuture<ShardSnapshotResult> future = new PlainActionFuture<>();
        runAsSnapshot(shard.getThreadPool(), () -> {
            repository.snapshotShard(new SnapshotShardContext(shard.store(), shard.mapperService(), snapshotId, indexId, new SnapshotIndexCommit(snapshotRef), null, indexShardSnapshotStatus, IndexVersion.current(), randomMillisUpToYear9999(), future));
            future.actionGet();
            final PlainActionFuture<SnapshotInfo> finFuture = new PlainActionFuture<>();
            final ShardGenerations shardGenerations = ShardGenerations.builder().put(indexId, 0, indexShardSnapshotStatus.generation()).build();
            repository.finalizeSnapshot(new FinalizeSnapshotContext(shardGenerations, ESBlobStoreRepositoryIntegTestCase.getRepositoryData(repository).getGenId(), Metadata.builder().put(shard.indexSettings().getIndexMetadata(), false).build(), new SnapshotInfo(new Snapshot(repository.getMetadata().name(), snapshotId), shardGenerations.indices().stream().map(IndexId::getName).collect(Collectors.toList()), Collections.emptyList(), Collections.emptyList(), null, 1L, shardGenerations.totalShards(), Collections.emptyList(), true, Collections.emptyMap(), 0L, Collections.emptyMap()), IndexVersion.current(), new ActionListener<>() {

                @Override
                public void onResponse(RepositoryData repositoryData) {
                }

                @Override
                public void onFailure(Exception e) {
                    finFuture.onFailure(e);
                }
            }, finFuture::onResponse));
            finFuture.actionGet();
        });
        IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy();
        assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount());
        assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE);
    }
    shard.refresh("test");
    ShardRouting shardRouting = shardRoutingBuilder(new ShardId("index", "_na_", 0), randomAlphaOfLength(10), true, ShardRoutingState.INITIALIZING).withRecoverySource(new RecoverySource.SnapshotRecoverySource(UUIDs.randomBase64UUID(), new Snapshot("src_only", snapshotId), IndexVersion.current(), indexId)).build();
    IndexMetadata metadata = runAsSnapshot(threadPool, () -> repository.getSnapshotIndexMetaData(PlainActionFuture.get(listener -> repository.getRepositoryData(EsExecutors.DIRECT_EXECUTOR_SERVICE, listener)), snapshotId, indexId));
    IndexShard restoredShard = newShard(shardRouting, metadata, null, SourceOnlySnapshotRepository.getEngineFactory(), NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY);
    DiscoveryNode discoveryNode = DiscoveryNodeUtils.create("node_g");
    restoredShard.markAsRecovering("test from snap", new RecoveryState(restoredShard.routingEntry(), discoveryNode, null));
    runAsSnapshot(shard.getThreadPool(), () -> {
        final PlainActionFuture<Boolean> future = new PlainActionFuture<>();
        restoredShard.restoreFromRepository(repository, future);
        assertTrue(future.actionGet());
    });
    assertEquals(restoredShard.recoveryState().getStage(), RecoveryState.Stage.DONE);
    assertEquals(restoredShard.recoveryState().getTranslog().recoveredOperations(), 0);
    assertEquals(IndexShardState.POST_RECOVERY, restoredShard.state());
    restoredShard.refresh("test");
    assertEquals(restoredShard.docStats().getCount(), shard.docStats().getCount());
    EngineException engineException = expectThrows(EngineException.class, () -> restoredShard.get(new Engine.Get(false, false, Integer.toString(0))));
    assertEquals(engineException.getCause().getMessage(), "_source only indices can't be searched or filtered");
    SeqNoStats seqNoStats = restoredShard.seqNoStats();
    assertEquals(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint());
    final IndexShard targetShard;
    try (Engine.Searcher searcher = restoredShard.acquireSearcher("test")) {
        assertEquals(searcher.getIndexReader().maxDoc(), seqNoStats.getLocalCheckpoint());
        TopDocs search = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE);
        assertEquals(searcher.getIndexReader().numDocs(), search.totalHits.value);
        search = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE, new Sort(new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG)), false);
        assertEquals(searcher.getIndexReader().numDocs(), search.totalHits.value);
        long previous = -1;
        for (ScoreDoc doc : search.scoreDocs) {
            FieldDoc fieldDoc = (FieldDoc) doc;
            assertEquals(1, fieldDoc.fields.length);
            long current = (Long) fieldDoc.fields[0];
            assertThat(previous, Matchers.lessThan(current));
            previous = current;
        }
        expectThrows(UnsupportedOperationException.class, () -> searcher.search(new TermQuery(new Term("boom", "boom")), 1));
        targetShard = reindex(searcher.getDirectoryReader(), new MappingMetadata("_doc", restoredShard.mapperService().documentMapper().mapping().getMeta()));
    }
    for (int i = 0; i < numInitialDocs; i++) {
        Engine.Get get = new Engine.Get(false, false, Integer.toString(i));
        Engine.GetResult original = shard.get(get);
        Engine.GetResult restored = targetShard.get(get);
        assertEquals(original.exists(), restored.exists());
        if (original.exists()) {
            Document document = original.docIdAndVersion().reader.document(original.docIdAndVersion().docId);
            Document restoredDocument = restored.docIdAndVersion().reader.document(restored.docIdAndVersion().docId);
            for (IndexableField field : document) {
                assertEquals(document.get(field.name()), restoredDocument.get(field.name()));
            }
        }
        IOUtils.close(original, restored);
    }
    closeShards(shard, restoredShard, targetShard);
}
272766.0411143elasticsearch
public void testEqlRestUsage() throws IOException {
    if (client().performRequest(new Request("HEAD", "/" + DataLoader.TEST_INDEX)).getStatusLine().getStatusCode() == 404) {
        DataLoader.loadDatasetIntoEs(client(), this::createParser);
    }
    String defaultPipe = "pipe_tail";
    int randomEventExecutions = randomIntBetween(1, 15);
    int allTotalQueries = baseAllTotalQueries + randomEventExecutions;
    for (int i = 0; i < randomEventExecutions; i++) {
        runEql("process where serial_event_id < 4 | head 3");
    }
    Map<String, Object> responseAsMap = getStats();
    Set<String> metricsToCheck = Set.of("pipe_head", "event");
    assertFeaturesMetrics(randomEventExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomSequenceExecutions = randomIntBetween(1, 15);
    allTotalQueries += randomSequenceExecutions;
    for (int i = 0; i < randomSequenceExecutions; i++) {
        runEql("sequence [process where serial_event_id == 1] [process where serial_event_id == 2]");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_queries_two", defaultPipe);
    assertFeaturesMetrics(randomSequenceExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomTailExecutions = randomIntBetween(1, 15);
    allTotalQueries += randomTailExecutions;
    for (int i = 0; i < randomTailExecutions; i++) {
        runEql("process where serial_event_id < 4 | tail 2");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("pipe_tail", "event");
    assertFeaturesMetrics(randomTailExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomMaxspanExecutions = randomIntBetween(1, 15);
    allTotalQueries += randomMaxspanExecutions;
    for (int i = 0; i < randomMaxspanExecutions; i++) {
        runEql("sequence with maxspan=1d" + "  [process where serial_event_id < 4] by exit_code" + "  [process where opcode == 1] by pid" + "  [process where opcode == 2] by pid" + "  [file where parent_process_name == \\\"file_delete_event\\\"] by exit_code" + " until [process where opcode==1] by ppid" + " | head 4" + " | tail 2");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_maxspan", "sequence_queries_four", "pipe_head", "pipe_tail", "join_keys_one", "sequence_until");
    assertFeaturesMetrics(randomMaxspanExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomThreeQueriesSequences = randomIntBetween(1, 15);
    allTotalQueries += randomThreeQueriesSequences;
    for (int i = 0; i < randomThreeQueriesSequences; i++) {
        runEql("sequence with maxspan=1d" + "  [process where serial_event_id < 4] by user" + "  [process where opcode == 1] by user" + "  [process where opcode == 2] by user");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_queries_three", "join_keys_one", "sequence_maxspan", defaultPipe);
    assertFeaturesMetrics(randomThreeQueriesSequences, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomFiveQueriesSequences = randomIntBetween(1, 15);
    allTotalQueries += randomFiveQueriesSequences;
    for (int i = 0; i < randomFiveQueriesSequences; i++) {
        runEql("sequence by user, ppid, exit_code with maxspan=1m" + "  [process where serial_event_id < 4]" + "  [process where opcode == 1]" + "  [file where parent_process_name == \\\"file_delete_event\\\"]" + "  [process where serial_event_id < 4]" + "  [process where opcode == 1]" + "| tail 4");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_queries_five_or_more", "pipe_tail", "join_keys_three", "sequence_maxspan");
    assertFeaturesMetrics(randomFiveQueriesSequences, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomFourJoinKeysExecutions = randomIntBetween(1, 15);
    allTotalQueries += randomFourJoinKeysExecutions;
    for (int i = 0; i < randomFourJoinKeysExecutions; i++) {
        runEql("sequence by exit_code, user, serial_event_id, pid" + "  [process where serial_event_id < 4]" + "  [process where opcode == 1]");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_queries_two", "join_keys_four", defaultPipe);
    assertFeaturesMetrics(randomFourJoinKeysExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomFiveJoinKeysExecutions = randomIntBetween(1, 15);
    allTotalQueries += randomFiveJoinKeysExecutions;
    for (int i = 0; i < randomFiveJoinKeysExecutions; i++) {
        runEql("sequence by exit_code, user, serial_event_id, pid, ppid" + "  [process where serial_event_id < 4]" + "  [process where opcode == 1]");
    }
    responseAsMap = getStats();
    metricsToCheck = Set.of("sequence", "sequence_queries_two", "join_keys_five_or_more", defaultPipe);
    assertFeaturesMetrics(randomFiveJoinKeysExecutions, responseAsMap, metricsToCheck);
    assertFeaturesMetricsExcept(responseAsMap, metricsToCheck);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
    int randomFailedExecutions = randomIntBetween(1, 15);
    int allFailedQueries = baseAllFailedQueries + randomFailedExecutions;
    allTotalQueries += randomFailedExecutions;
    for (int i = 0; i < randomFailedExecutions; i++) {
        expectThrows(Exception.class, () -> {
            runEql(randomFrom("process where missing_field < 4 | tail 2", "sequence abc [process where serial_event_id == 1]", "sequence with maxspan=1x [process where serial_event_id == 1]", "sequence by exit_code, user [process where serial_event_id < 4] by ppid", "sequence by"));
        });
    }
    responseAsMap = getStats();
    assertAllFailedQueryMetrics(allFailedQueries, responseAsMap);
    assertAllQueryMetrics(allTotalQueries, responseAsMap);
}
274508.829130elasticsearch
private static void bytesRefs(List<TestCaseSupplier> suppliers) {
    suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 10, () -> randomLiteral(DataTypes.KEYWORD).value());
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, DataTypes.KEYWORD, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", DataTypes.KEYWORD, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(DataTypes.TEXT, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 10, () -> randomLiteral(DataTypes.TEXT).value());
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, DataTypes.TEXT, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", DataTypes.TEXT, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(DataTypes.IP, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 10, () -> randomLiteral(DataTypes.IP).value());
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, DataTypes.IP, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", DataTypes.IP, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(DataTypes.VERSION, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 10, () -> randomLiteral(DataTypes.VERSION).value());
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, DataTypes.VERSION, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", DataTypes.VERSION, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 5, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomPoint())));
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_POINT, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", EsqlDataTypes.GEO_POINT, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_POINT, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 5, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomPoint())));
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_POINT, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", EsqlDataTypes.CARTESIAN_POINT, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.GEO_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 5, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))));
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, EsqlDataTypes.GEO_SHAPE, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", EsqlDataTypes.GEO_SHAPE, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
    suppliers.add(new TestCaseSupplier(List.of(EsqlDataTypes.CARTESIAN_SHAPE, DataTypes.INTEGER, DataTypes.INTEGER), () -> {
        List<Object> field = randomList(1, 5, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean()))));
        int length = field.size();
        int start = randomIntBetween(0, length - 1);
        int end = randomIntBetween(start, length - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(field, EsqlDataTypes.CARTESIAN_SHAPE, "field"), new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), new TestCaseSupplier.TypedData(end, DataTypes.INTEGER, "end")), "MvSliceBytesRefEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", EsqlDataTypes.CARTESIAN_SHAPE, equalTo(start == end ? field.get(start) : field.subList(start, end + 1)));
    }));
}
273015.561151elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = TestCaseSupplier.forBinaryWithWidening(new TestCaseSupplier.NumericTypeTestConfigs<Number>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> l.intValue() - r.intValue(), "SubIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> l.longValue() - r.longValue(), "SubLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> l.doubleValue() - r.doubleValue(), "SubDoublesEvaluator")), "lhs", "rhs", (lhs, rhs) -> List.of(), true);
    suppliers.add(new TestCaseSupplier("Datetime - Period", () -> {
        long lhs = (Long) randomLiteral(DataTypes.DATETIME).value();
        Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value();
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs")), "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))));
    }));
    suppliers.add(new TestCaseSupplier("Period - Period", () -> {
        Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value();
        Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value();
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs")), "Only folding possible, so there's no evaluator", EsqlDataTypes.DATE_PERIOD, equalTo(lhs.minus(rhs)));
    }));
    suppliers.add(new TestCaseSupplier("Datetime - Duration", () -> {
        long lhs = (Long) randomLiteral(DataTypes.DATETIME).value();
        Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value();
        TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs")), "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))));
        return testCase;
    }));
    suppliers.add(new TestCaseSupplier("Duration - Duration", () -> {
        Duration lhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value();
        Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value();
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.TIME_DURATION, "lhs"), new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs")), "Only folding possible, so there's no evaluator", EsqlDataTypes.TIME_DURATION, equalTo(lhs.minus(rhs)));
    }));
    suppliers.add(new TestCaseSupplier("MV", () -> {
        int rhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        int lhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        int lhs2 = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(List.of(lhs, lhs2), DataTypes.INTEGER, "lhs"), new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs")), "SubIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, is(nullValue())).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.").withWarning("Line -1:-1: java.lang.IllegalArgumentException: single-value function encountered multi-value");
    }));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.INTEGER, () -> Integer.MIN_VALUE, () -> randomIntBetween(1, Integer.MAX_VALUE), "SubIntsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.INTEGER, () -> randomIntBetween(Integer.MIN_VALUE, -2), () -> Integer.MAX_VALUE, "SubIntsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.LONG, () -> Long.MIN_VALUE, () -> randomLongBetween(1L, Long.MAX_VALUE), "SubLongsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.LONG, () -> randomLongBetween(Long.MIN_VALUE, -2L), () -> Long.MAX_VALUE, "SubLongsEvaluator"));
    suppliers.add(arithmeticExceptionOverflowCase(DataTypes.UNSIGNED_LONG, () -> ZERO_AS_UNSIGNED_LONG, () -> randomLongBetween(-Long.MAX_VALUE, Long.MAX_VALUE), "SubUnsignedLongsEvaluator"));
    return parameterSuppliersFromTypedData(suppliers);
}
274876.432136elasticsearch
public void testIndexTemplateSwapsILMForDataStreamLifecycle() throws Exception {
    RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build());
    Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction));
    LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase));
    PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy);
    assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, null);
    CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
    client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName));
        List<Index> backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices();
        assertThat(backingIndices.size(), equalTo(2));
        String backingIndex = backingIndices.get(0).getName();
        assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1));
        String writeIndex = backingIndices.get(1).getName();
        assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(WaitForRolloverReadyStep.NAME));
    });
    DataStreamLifecycle customLifecycle = customEnabledLifecycle();
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.EMPTY, null, customLifecycle);
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(3));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String writeIndex = backingIndices.get(2);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(writeIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(false));
        ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, new ExplainDataStreamLifecycleAction.Request(new String[] { writeIndex })).actionGet();
        assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(1));
        ExplainIndexDataStreamLifecycle writeIndexDataStreamLifecycleExplain = dataStreamLifecycleExplainResponse.getIndices().get(0);
        assertThat(writeIndexDataStreamLifecycleExplain.isManagedByLifecycle(), is(false));
    });
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, new DataStreamLifecycle())).actionGet();
    indexDocs(dataStreamName, 1);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(4));
    });
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention())).actionGet();
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String thirdGenerationIndex = backingIndices.get(2);
        String writeIndex = backingIndices.get(3);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, thirdGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(thirdGenerationIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(false));
        IndexLifecycleExplainResponse writeIndexExplain = explainResponse.getIndexResponses().get(writeIndex);
        assertThat(writeIndexExplain.managedByILM(), is(false));
        ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, new ExplainDataStreamLifecycleAction.Request(new String[] { thirdGenerationIndex, writeIndex })).actionGet();
        assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(2));
        for (ExplainIndexDataStreamLifecycle index : dataStreamLifecycleExplainResponse.getIndices()) {
            assertThat(index.isManagedByLifecycle(), is(true));
            assertThat(index.getLifecycle(), equalTo(customLifecycle));
        }
    });
}
274548.221135elasticsearch
public void testUpdateIndexTemplateFromILMtoBothILMAndDataStreamLifecycle() throws Exception {
    RolloverAction rolloverIlmAction = new RolloverAction(RolloverConditions.newBuilder().addMaxIndexDocsCondition(2L).build());
    Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(rolloverIlmAction.getWriteableName(), rolloverIlmAction));
    LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, Map.of("hot", hotPhase));
    PutLifecycleRequest putLifecycleRequest = new PutLifecycleRequest(lifecyclePolicy);
    assertAcked(client().execute(ILMActions.PUT, putLifecycleRequest).get());
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, null);
    CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
    client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get();
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName));
        List<Index> backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices();
        assertThat(backingIndices.size(), equalTo(2));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(WaitForRolloverReadyStep.NAME));
    });
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, new DataStreamLifecycle());
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(3));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String thirdGenerationIndex = backingIndices.get(2);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, thirdGenerationIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(thirdGenerationIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(true));
        assertThat(thirdGenerationExplain.getStep(), is(WaitForRolloverReadyStep.NAME));
    });
    client().execute(PutDataStreamLifecycleAction.INSTANCE, new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, TimeValue.timeValueDays(90)));
    putComposableIndexTemplate(indexTemplateName, null, List.of(dataStreamName + "*"), Settings.builder().put(IndexSettings.PREFER_ILM, false).put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, new DataStreamLifecycle());
    indexDocs(dataStreamName, 2);
    assertBusy(() -> {
        GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
        GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest).actionGet();
        assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1));
        assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().size(), is(4));
    });
    assertBusy(() -> {
        List<String> backingIndices = getBackingIndices(dataStreamName);
        String firstGenerationIndex = backingIndices.get(0);
        String secondGenerationIndex = backingIndices.get(1);
        String thirdGenerationIndex = backingIndices.get(2);
        String writeIndex = backingIndices.get(3);
        ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest().indices(firstGenerationIndex, secondGenerationIndex, thirdGenerationIndex, writeIndex);
        ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get();
        IndexLifecycleExplainResponse firstGenerationExplain = explainResponse.getIndexResponses().get(firstGenerationIndex);
        assertThat(firstGenerationExplain.managedByILM(), is(true));
        assertThat(firstGenerationExplain.getPhase(), is("hot"));
        assertThat(firstGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse secondGenerationExplain = explainResponse.getIndexResponses().get(secondGenerationIndex);
        assertThat(secondGenerationExplain.managedByILM(), is(true));
        assertThat(secondGenerationExplain.getPhase(), is("hot"));
        assertThat(secondGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        IndexLifecycleExplainResponse thirdGenerationExplain = explainResponse.getIndexResponses().get(thirdGenerationIndex);
        assertThat(thirdGenerationExplain.managedByILM(), is(true));
        assertThat(thirdGenerationExplain.getPhase(), is("hot"));
        assertThat(thirdGenerationExplain.getStep(), is(PhaseCompleteStep.NAME));
        ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute(ExplainDataStreamLifecycleAction.INSTANCE, new ExplainDataStreamLifecycleAction.Request(new String[] { writeIndex })).actionGet();
        assertThat(dataStreamLifecycleExplainResponse.getIndices().size(), is(1));
        ExplainIndexDataStreamLifecycle dataStreamLifecycleExplain = dataStreamLifecycleExplainResponse.getIndices().get(0);
        assertThat(dataStreamLifecycleExplain.isManagedByLifecycle(), is(true));
        assertThat(dataStreamLifecycleExplain.getIndex(), is(writeIndex));
    });
}
272862.984148elasticsearch
public void testIndicesOnShuttingDownNodesInDangerousStep() {
    for (SingleNodeShutdownMetadata.Type type : List.of(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.SIGTERM, SingleNodeShutdownMetadata.Type.REPLACE)) {
        ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build();
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Collections.emptySet()));
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Collections.emptySet()));
        IndexMetadata nonDangerousIndex = IndexMetadata.builder("no_danger").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy")).putCustom(ILM_CUSTOM_METADATA_KEY, LifecycleExecutionState.builder().setPhase("warm").setAction("shrink").setStep(GenerateUniqueIndexNameStep.NAME).build().asMap()).numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
        IndexMetadata dangerousIndex = IndexMetadata.builder("danger").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", "shutdown_node")).putCustom(ILM_CUSTOM_METADATA_KEY, LifecycleExecutionState.builder().setPhase("warm").setAction("shrink").setStep(randomFrom(SetSingleNodeAllocateStep.NAME, CheckShrinkReadyStep.NAME, ShrinkStep.NAME, ShrunkShardsAllocatedStep.NAME)).build().asMap()).numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
        Map<String, IndexMetadata> indices = Map.of("no_danger", nonDangerousIndex, "danger", dangerousIndex);
        Metadata metadata = Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)).indices(indices).persistentSettings(settings(IndexVersion.current()).build()).build();
        state = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).add(DiscoveryNodeUtils.builder("regular_node").applySettings(NodeRoles.masterNode(settings(IndexVersion.current()).build())).address(new TransportAddress(TransportAddress.META_ADDRESS, 9301)).build()).add(DiscoveryNodeUtils.builder("shutdown_node").applySettings(NodeRoles.masterNode(settings(IndexVersion.current()).build())).address(new TransportAddress(TransportAddress.META_ADDRESS, 9302)).build()).build()).build();
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Collections.emptySet()));
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Collections.emptySet()));
        state = ClusterState.builder(state).metadata(Metadata.builder(state.metadata()).putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(Collections.singletonMap("shutdown_node", SingleNodeShutdownMetadata.builder().setNodeId("shutdown_node").setReason("shut down for test").setStartedAtMillis(randomNonNegativeLong()).setType(SingleNodeShutdownMetadata.Type.RESTART).build()))).build()).build();
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "regular_node"), equalTo(Collections.emptySet()));
        assertThat("restart type shutdowns are not considered dangerous", IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Collections.emptySet()));
        final String targetNodeName = type == SingleNodeShutdownMetadata.Type.REPLACE ? randomAlphaOfLengthBetween(10, 20) : null;
        final TimeValue grace = type == SIGTERM ? randomTimeValue() : null;
        state = ClusterState.builder(state).metadata(Metadata.builder(state.metadata()).putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(Collections.singletonMap("shutdown_node", SingleNodeShutdownMetadata.builder().setNodeId("shutdown_node").setReason("shut down for test").setStartedAtMillis(randomNonNegativeLong()).setType(type).setTargetNodeName(targetNodeName).setGracePeriod(grace).build()))).build()).build();
        assertThat(IndexLifecycleService.indicesOnShuttingDownNodesInDangerousStep(state, "shutdown_node"), equalTo(Collections.singleton("danger")));
    }
}
271494.7944104elasticsearch
 static long calculateDetectorRequirementBytes(Detector detector, long bucketSpanSeconds, Map<String, Long> overallCardinality) {
    long answer = 0;
    boolean addFieldValueWorkspace = false;
    switch(detector.getFunction()) {
        case DISTINCT_COUNT:
        case LOW_DISTINCT_COUNT:
        case HIGH_DISTINCT_COUNT:
            addFieldValueWorkspace = true;
        case COUNT:
        case LOW_COUNT:
        case HIGH_COUNT:
        case NON_ZERO_COUNT:
        case LOW_NON_ZERO_COUNT:
        case HIGH_NON_ZERO_COUNT:
            answer = ByteSizeValue.ofKb(32).getBytes();
            break;
        case RARE:
        case FREQ_RARE:
            answer = ByteSizeValue.ofKb(2).getBytes();
            break;
        case INFO_CONTENT:
        case LOW_INFO_CONTENT:
        case HIGH_INFO_CONTENT:
            addFieldValueWorkspace = true;
        case MEAN:
        case LOW_MEAN:
        case HIGH_MEAN:
        case AVG:
        case LOW_AVG:
        case HIGH_AVG:
        case MIN:
        case MAX:
        case SUM:
        case LOW_SUM:
        case HIGH_SUM:
        case NON_NULL_SUM:
        case LOW_NON_NULL_SUM:
        case HIGH_NON_NULL_SUM:
        case VARP:
        case LOW_VARP:
        case HIGH_VARP:
            answer = ByteSizeValue.ofKb(48).getBytes();
            break;
        case METRIC:
            answer = ByteSizeValue.ofKb(120).getBytes();
            break;
        case MEDIAN:
        case LOW_MEDIAN:
        case HIGH_MEDIAN:
            answer = ByteSizeValue.ofKb(64).getBytes();
            break;
        case TIME_OF_DAY:
        case TIME_OF_WEEK:
            answer = ByteSizeValue.ofKb(10).getBytes();
            break;
        case LAT_LONG:
            answer = ByteSizeValue.ofKb(64).getBytes();
            break;
        default:
            assert false : "unhandled detector function: " + detector.getFunction().getFullName();
    }
    long partitionFieldCardinalityEstimate = 1;
    String partitionFieldName = detector.getPartitionFieldName();
    if (partitionFieldName != null) {
        partitionFieldCardinalityEstimate = Math.max(1, cardinalityEstimate(Detector.PARTITION_FIELD_NAME_FIELD.getPreferredName(), partitionFieldName, overallCardinality, true));
    }
    String byFieldName = detector.getByFieldName();
    if (byFieldName != null) {
        long byFieldCardinalityEstimate = cardinalityEstimate(Detector.BY_FIELD_NAME_FIELD.getPreferredName(), byFieldName, overallCardinality, true);
        double multiplier = Math.ceil(reducedCardinality(byFieldCardinalityEstimate, partitionFieldCardinalityEstimate, bucketSpanSeconds) * 2.0 / 3.0);
        answer = multiplyNonNegativeLongsWithMaxValueCap(answer, (long) multiplier);
    }
    String overFieldName = detector.getOverFieldName();
    if (overFieldName != null) {
        long overFieldCardinalityEstimate = cardinalityEstimate(Detector.OVER_FIELD_NAME_FIELD.getPreferredName(), overFieldName, overallCardinality, true);
        double multiplier = Math.ceil(reducedCardinality(overFieldCardinalityEstimate, partitionFieldCardinalityEstimate, bucketSpanSeconds));
        answer = addNonNegativeLongsWithMaxValueCap(answer, multiplyNonNegativeLongsWithMaxValueCap(768, (long) multiplier));
    }
    if (partitionFieldName != null) {
        answer = multiplyNonNegativeLongsWithMaxValueCap(answer, partitionFieldCardinalityEstimate);
    }
    if (addFieldValueWorkspace) {
        answer = addNonNegativeLongsWithMaxValueCap(answer, ByteSizeValue.ofMb(5).getBytes());
    }
    return answer;
}
272822.2717131elasticsearch
public void deleteJobDocuments(JobConfigProvider jobConfigProvider, IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, CheckedConsumer<Boolean, Exception> finishedHandler, Consumer<Exception> failureHandler) {
    AtomicReference<String[]> indexNames = new AtomicReference<>();
    final ActionListener<IndicesAliasesResponse> completionHandler = ActionListener.wrap(response -> finishedHandler.accept(response.isAcknowledged()), failureHandler);
    ActionListener<BulkByScrollResponse> dbqHandler = ActionListener.wrap(bulkByScrollResponse -> {
        if (bulkByScrollResponse == null) {
            completionHandler.onResponse(IndicesAliasesResponse.ACKNOWLEDGED_NO_ERRORS);
        } else {
            if (bulkByScrollResponse.isTimedOut()) {
                logger.warn("[{}] DeleteByQuery for indices [{}] timed out.", jobId, String.join(", ", indexNames.get()));
            }
            if (bulkByScrollResponse.getBulkFailures().isEmpty() == false) {
                logger.warn("[{}] {} failures and {} conflicts encountered while running DeleteByQuery on indices [{}].", jobId, bulkByScrollResponse.getBulkFailures().size(), bulkByScrollResponse.getVersionConflicts(), String.join(", ", indexNames.get()));
                for (BulkItemResponse.Failure failure : bulkByScrollResponse.getBulkFailures()) {
                    logger.warn("DBQ failure: " + failure);
                }
            }
            deleteAliases(jobId, completionHandler);
        }
    }, failureHandler);
    ActionListener<Boolean> deleteByQueryExecutor = ActionListener.wrap(response -> {
        if (response && indexNames.get().length > 0) {
            deleteResultsByQuery(jobId, indexNames.get(), dbqHandler);
        } else {
            dbqHandler.onResponse(null);
        }
    }, failureHandler);
    ActionListener<MultiSearchResponse> customIndexSearchHandler = ActionListener.wrap(multiSearchResponse -> {
        if (multiSearchResponse == null) {
            deleteByQueryExecutor.onResponse(true);
            return;
        }
        String defaultSharedIndex = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT;
        List<String> indicesToDelete = new ArrayList<>();
        boolean needToRunDBQTemp = false;
        assert multiSearchResponse.getResponses().length == indexNames.get().length;
        int i = 0;
        for (MultiSearchResponse.Item item : multiSearchResponse.getResponses()) {
            if (item.isFailure()) {
                ++i;
                if (ExceptionsHelper.unwrapCause(item.getFailure()) instanceof IndexNotFoundException) {
                    continue;
                } else {
                    failureHandler.accept(item.getFailure());
                    return;
                }
            }
            SearchResponse searchResponse = item.getResponse();
            if (searchResponse.getHits().getTotalHits().value > 0 || indexNames.get()[i].equals(defaultSharedIndex)) {
                needToRunDBQTemp = true;
            } else {
                indicesToDelete.add(indexNames.get()[i]);
            }
            ++i;
        }
        final boolean needToRunDBQ = needToRunDBQTemp;
        if (indicesToDelete.isEmpty()) {
            deleteByQueryExecutor.onResponse(needToRunDBQ);
            return;
        }
        logger.info("[{}] deleting the following indices directly {}", jobId, indicesToDelete);
        DeleteIndexRequest request = new DeleteIndexRequest(indicesToDelete.toArray(String[]::new));
        request.indicesOptions(IndicesOptions.lenientExpandOpenHidden());
        executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, request, ActionListener.<AcknowledgedResponse>wrap(response -> deleteByQueryExecutor.onResponse(needToRunDBQ), failureHandler), client.admin().indices()::delete);
    }, failure -> {
        if (ExceptionsHelper.unwrapCause(failure) instanceof IndexNotFoundException) {
            deleteByQueryExecutor.onResponse(false);
        } else {
            failureHandler.accept(failure);
        }
    });
    ActionListener<Job.Builder> getJobHandler = ActionListener.wrap(builder -> {
        indexNames.set(indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), AnomalyDetectorsIndex.jobResultsAliasedName(jobId)));
        if (indexNames.get().length == 0) {
            customIndexSearchHandler.onResponse(null);
            return;
        }
        MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
        for (String indexName : indexNames.get()) {
            SearchSourceBuilder source = new SearchSourceBuilder().size(0).trackTotalHitsUpTo(1).query(QueryBuilders.boolQuery().filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId))));
            multiSearchRequest.add(new SearchRequest(indexName).source(source));
        }
        executeAsyncWithOrigin(client, ML_ORIGIN, TransportMultiSearchAction.TYPE, multiSearchRequest, customIndexSearchHandler);
    }, failureHandler);
    ActionListener<Boolean> deleteAnnotationsHandler = ActionListener.wrap(response -> jobConfigProvider.getJob(jobId, null, getJobHandler), failureHandler);
    ActionListener<Boolean> deleteCategorizerStateHandler = ActionListener.wrap(response -> deleteAllAnnotations(deleteAnnotationsHandler), failureHandler);
    ActionListener<Boolean> deleteQuantilesHandler = ActionListener.wrap(response -> deleteCategorizerState(jobId, 1, deleteCategorizerStateHandler), failureHandler);
    ActionListener<BulkByScrollResponse> deleteStateHandler = ActionListener.wrap(bulkResponse -> deleteQuantiles(jobId, deleteQuantilesHandler), failureHandler);
    deleteModelState(jobId, deleteStateHandler);
}
274414.843135elasticsearch
public void testKeywordsArray() throws IOException {
    List<MultiValuesSourceFieldConfig> fields = new ArrayList<>();
    String exclude = randomBoolean() ? randomFrom("item-3", "item-4", "item-5", "item-99") : null;
    fields.add(new MultiValuesSourceFieldConfig.Builder().setFieldName(KEYWORD_FIELD1).setIncludeExclude(exclude != null ? new IncludeExclude(null, null, null, new TreeSet<>(Set.of(new BytesRef(exclude)))) : null).build());
    double minimumSupport = randomDoubleBetween(0.13, 0.41, true);
    int minimumSetSize = randomIntBetween(2, 5);
    int size = randomIntBetween(1, 100);
    Query query = new MatchAllDocsQuery();
    MappedFieldType keywordType = new KeywordFieldMapper.KeywordFieldType(KEYWORD_FIELD1);
    List<FrequentItemSet> expectedResults = List.of(new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3")), 7, 0.7), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-7")), 6, 0.6), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-7")), 5, 0.5), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3", "item-7")), 4, 0.4), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-7", "item-8")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-4")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-6")), 3, 0.3), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3", "item-7", "item-8")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3", "item-7", "item-9")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-4", "item-7", "item-8")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3", "item-4")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-1", "item-3", "item-6")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-4", "item-6")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-6", "item-7")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-2", "item-3")), 2, 0.2), new FrequentItemSet(Map.of(KEYWORD_FIELD1, List.of("item-3", "item-5")), 2, 0.2));
    FrequentItemSetsAggregationBuilder builder = new FrequentItemSetsAggregationBuilder("fi", fields, minimumSupport, minimumSetSize, size, null, randomFrom(EXECUTION_HINT_ALLOWED_MODES));
    testCase(iw -> {
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-2")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-5"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-8")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-9")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-4"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-8")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-6")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-6")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-4"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-9")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-2")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-3")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-4")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-5")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-6")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-8"))));
        iw.addDocument(List.of(new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-1")), new SortedSetDocValuesField(KEYWORD_FIELD1, new BytesRef("item-7"))));
    }, (InternalItemSetMapReduceAggregation<?, ?, ?, EclatResult> results) -> {
        assertNotNull(results);
        assertResults(expectedResults, results.getMapReduceResult().getFrequentItemSets(), minimumSupport, minimumSetSize, size, exclude, null);
    }, new AggTestConfig(builder, keywordType).withQuery(query));
}
273389.591146elasticsearch
public void testClusterChanged() throws Exception {
    final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService();
    final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build();
    String modelOne = "model-1";
    String modelTwo = "model-2";
    String notUsedModel = "model-3";
    String previouslyUsedModel = "model-4";
    String deploymentOne = "deployment-1";
    String deploymentTwo = "deployment-2";
    String notUsedDeployment = "deployment-3";
    String previouslyUsedDeployment = "deployment-4";
    givenAssignmentsInClusterStateForModels(List.of(deploymentOne, deploymentTwo, previouslyUsedDeployment), List.of(modelOne, modelTwo, previouslyUsedModel));
    ClusterChangedEvent event = new ClusterChangedEvent("testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")).nodes(nodes).putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(deploymentOne, TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)).addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(deploymentTwo, TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)).addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")).updateExistingRoutingEntry(NODE_ID, new RoutingInfo(1, 1, randomFrom(RoutingState.STARTED, RoutingState.STARTING), randomAlphaOfLength(10)))).addNewAssignment(previouslyUsedDeployment, TrainedModelAssignment.Builder.empty(newParams(previouslyUsedDeployment, previouslyUsedModel)).addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")).updateExistingRoutingEntry(NODE_ID, new RoutingInfo(1, 1, randomFrom(RoutingState.STOPPED, RoutingState.FAILED, RoutingState.STOPPING), randomAlphaOfLength(10)))).addNewAssignment(notUsedDeployment, TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel)).addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build(), ClusterState.EMPTY_STATE);
    trainedModelAssignmentNodeService.clusterChanged(event);
    event = new ClusterChangedEvent("testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")).nodes(nodes).putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(deploymentOne, TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)).addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(deploymentTwo, TrainedModelAssignment.Builder.empty(newParams(deploymentTwo, modelTwo)).addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, ""))).addNewAssignment(notUsedDeployment, TrainedModelAssignment.Builder.empty(newParams(notUsedDeployment, notUsedModel)).addRoutingEntry("some-other-node", new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build(), ClusterState.EMPTY_STATE);
    trainedModelAssignmentNodeService.clusterChanged(event);
    trainedModelAssignmentNodeService.loadQueuedModels();
    assertBusy(() -> {
        ArgumentCaptor<TrainedModelDeploymentTask> stoppedTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class);
        verify(deploymentManager, times(1)).stopDeployment(stoppedTaskCapture.capture());
        assertThat(stoppedTaskCapture.getAllValues().get(0).getDeploymentId(), equalTo(deploymentTwo));
    });
    ArgumentCaptor<TrainedModelDeploymentTask> startTaskCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class);
    ArgumentCaptor<UpdateTrainedModelAssignmentRoutingInfoAction.Request> requestCapture = ArgumentCaptor.forClass(UpdateTrainedModelAssignmentRoutingInfoAction.Request.class);
    verify(deploymentManager, times(1)).startDeployment(startTaskCapture.capture(), any());
    verify(trainedModelAssignmentService, times(1)).updateModelAssignmentState(requestCapture.capture(), any());
    assertThat(startTaskCapture.getAllValues().get(0).getModelId(), equalTo(modelOne));
    assertThat(requestCapture.getAllValues().get(0).getDeploymentId(), equalTo(deploymentOne));
    assertThat(requestCapture.getAllValues().get(0).getNodeId(), equalTo(NODE_ID));
    assertThat(requestCapture.getAllValues().get(0).getUpdate().getStateAndReason().get().getState(), equalTo(RoutingState.STARTED));
    event = new ClusterChangedEvent("testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")).nodes(nodes).putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(deploymentOne, TrainedModelAssignment.Builder.empty(newParams(deploymentOne, modelOne)).addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, ""))).build()).build()).build(), ClusterState.EMPTY_STATE);
    trainedModelAssignmentNodeService.clusterChanged(event);
    trainedModelAssignmentNodeService.loadQueuedModels();
    verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService);
}
275838.872127elasticsearch
public void testUsage() throws Exception {
    when(licenseState.isAllowed(MachineLearningField.ML_API_FEATURE)).thenReturn(true);
    Settings.Builder settings = Settings.builder().put(commonSettings);
    settings.put("xpack.ml.enabled", true);
    Map<String, Integer> trainedModelsCountByAnalysis = Map.of("classification", 1, "regression", 1, "ner", 1);
    Map<String, Integer> expectedDfaCountByAnalysis = setupComplexMocks();
    var usageAction = newUsageAction(settings.build(), true, true, true);
    PlainActionFuture<XPackUsageFeatureResponse> future = new PlainActionFuture<>();
    usageAction.masterOperation(null, null, ClusterState.EMPTY_STATE, future);
    XPackFeatureSet.Usage mlUsage = future.get().getUsage();
    BytesStreamOutput out = new BytesStreamOutput();
    mlUsage.writeTo(out);
    XPackFeatureSet.Usage serializedUsage = new MachineLearningFeatureSetUsage(out.bytes().streamInput());
    for (XPackFeatureSet.Usage usage : Arrays.asList(mlUsage, serializedUsage)) {
        assertThat(usage, is(notNullValue()));
        assertThat(usage.name(), is(XPackField.MACHINE_LEARNING));
        assertThat(usage.enabled(), is(true));
        assertThat(usage.available(), is(true));
        XContentSource source;
        try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
            usage.toXContent(builder, ToXContent.EMPTY_PARAMS);
            source = new XContentSource(builder);
        }
        assertThat(source.getValue("jobs._all.count"), equalTo(3));
        assertThat(source.getValue("jobs._all.detectors.min"), equalTo(1.0));
        assertThat(source.getValue("jobs._all.detectors.max"), equalTo(3.0));
        assertThat(source.getValue("jobs._all.detectors.total"), equalTo(6.0));
        assertThat(source.getValue("jobs._all.detectors.avg"), equalTo(2.0));
        assertThat(source.getValue("jobs._all.model_size.min"), equalTo(100.0));
        assertThat(source.getValue("jobs._all.model_size.max"), equalTo(300.0));
        assertThat(source.getValue("jobs._all.model_size.total"), equalTo(600.0));
        assertThat(source.getValue("jobs._all.model_size.avg"), equalTo(200.0));
        assertThat(source.getValue("jobs._all.created_by.a_cool_module"), equalTo(1));
        assertThat(source.getValue("jobs._all.created_by.unknown"), equalTo(2));
        assertThat(source.getValue("jobs.opened.count"), equalTo(2));
        assertThat(source.getValue("jobs.opened.detectors.min"), equalTo(1.0));
        assertThat(source.getValue("jobs.opened.detectors.max"), equalTo(2.0));
        assertThat(source.getValue("jobs.opened.detectors.total"), equalTo(3.0));
        assertThat(source.getValue("jobs.opened.detectors.avg"), equalTo(1.5));
        assertThat(source.getValue("jobs.opened.model_size.min"), equalTo(100.0));
        assertThat(source.getValue("jobs.opened.model_size.max"), equalTo(200.0));
        assertThat(source.getValue("jobs.opened.model_size.total"), equalTo(300.0));
        assertThat(source.getValue("jobs.opened.model_size.avg"), equalTo(150.0));
        assertThat(source.getValue("jobs.opened.created_by.a_cool_module"), equalTo(1));
        assertThat(source.getValue("jobs.opened.created_by.unknown"), equalTo(1));
        assertThat(source.getValue("jobs.closed.count"), equalTo(1));
        assertThat(source.getValue("jobs.closed.detectors.min"), equalTo(3.0));
        assertThat(source.getValue("jobs.closed.detectors.max"), equalTo(3.0));
        assertThat(source.getValue("jobs.closed.detectors.total"), equalTo(3.0));
        assertThat(source.getValue("jobs.closed.detectors.avg"), equalTo(3.0));
        assertThat(source.getValue("jobs.closed.model_size.min"), equalTo(300.0));
        assertThat(source.getValue("jobs.closed.model_size.max"), equalTo(300.0));
        assertThat(source.getValue("jobs.closed.model_size.total"), equalTo(300.0));
        assertThat(source.getValue("jobs.closed.model_size.avg"), equalTo(300.0));
        assertThat(source.getValue("jobs.closed.created_by.a_cool_module"), is(nullValue()));
        assertThat(source.getValue("jobs.closed.created_by.unknown"), equalTo(1));
        assertThat(source.getValue("jobs.opening"), is(nullValue()));
        assertThat(source.getValue("jobs.closing"), is(nullValue()));
        assertThat(source.getValue("jobs.failed"), is(nullValue()));
        assertThat(source.getValue("datafeeds._all.count"), equalTo(3));
        assertThat(source.getValue("datafeeds.started.count"), equalTo(2));
        assertThat(source.getValue("datafeeds.stopped.count"), equalTo(1));
        assertThat(source.getValue("data_frame_analytics_jobs._all.count"), equalTo(3));
        assertThat(source.getValue("data_frame_analytics_jobs.started.count"), equalTo(1));
        assertThat(source.getValue("data_frame_analytics_jobs.stopped.count"), equalTo(2));
        assertThat(source.getValue("data_frame_analytics_jobs.analysis_counts"), equalTo(expectedDfaCountByAnalysis));
        assertThat(source.getValue("data_frame_analytics_jobs.memory_usage.peak_usage_bytes.min"), equalTo(100.0));
        assertThat(source.getValue("data_frame_analytics_jobs.memory_usage.peak_usage_bytes.max"), equalTo(200.0));
        assertThat(source.getValue("data_frame_analytics_jobs.memory_usage.peak_usage_bytes.total"), equalTo(300.0));
        assertThat(source.getValue("data_frame_analytics_jobs.memory_usage.peak_usage_bytes.avg"), equalTo(150.0));
        assertThat(source.getValue("jobs._all.forecasts.total"), equalTo(11));
        assertThat(source.getValue("jobs._all.forecasts.forecasted_jobs"), equalTo(2));
        assertThat(source.getValue("jobs.closed.forecasts.total"), equalTo(0));
        assertThat(source.getValue("jobs.closed.forecasts.forecasted_jobs"), equalTo(0));
        assertThat(source.getValue("jobs.opened.forecasts.total"), equalTo(11));
        assertThat(source.getValue("jobs.opened.forecasts.forecasted_jobs"), equalTo(2));
        assertThat(source.getValue("inference.trained_models._all.count"), equalTo(4));
        assertThat(source.getValue("inference.trained_models.model_size_bytes.min"), equalTo(100.0));
        assertThat(source.getValue("inference.trained_models.model_size_bytes.max"), equalTo(300.0));
        assertThat(source.getValue("inference.trained_models.model_size_bytes.total"), equalTo(600.0));
        assertThat(source.getValue("inference.trained_models.model_size_bytes.avg"), equalTo(200.0));
        assertThat(source.getValue("inference.trained_models.estimated_operations.min"), equalTo(200.0));
        assertThat(source.getValue("inference.trained_models.estimated_operations.max"), equalTo(600.0));
        assertThat(source.getValue("inference.trained_models.estimated_operations.total"), equalTo(1200.0));
        assertThat(source.getValue("inference.trained_models.estimated_operations.avg"), equalTo(400.0));
        assertThat(source.getValue("inference.trained_models.count.total"), equalTo(4));
        trainedModelsCountByAnalysis.forEach((name, count) -> assertThat(source.getValue("inference.trained_models.count." + name), equalTo(count)));
        assertThat(source.getValue("inference.trained_models.count.prepackaged"), equalTo(1));
        assertThat(source.getValue("inference.trained_models.count.other"), equalTo(1));
        assertThat(source.getValue("inference.ingest_processors._all.pipelines.count"), equalTo(10));
        assertThat(source.getValue("inference.ingest_processors._all.num_docs_processed.sum"), equalTo(150));
        assertThat(source.getValue("inference.ingest_processors._all.num_docs_processed.min"), equalTo(10));
        assertThat(source.getValue("inference.ingest_processors._all.num_docs_processed.max"), equalTo(50));
        assertThat(source.getValue("inference.ingest_processors._all.time_ms.sum"), equalTo(15));
        assertThat(source.getValue("inference.ingest_processors._all.time_ms.min"), equalTo(1));
        assertThat(source.getValue("inference.ingest_processors._all.time_ms.max"), equalTo(5));
        assertThat(source.getValue("inference.ingest_processors._all.num_failures.sum"), equalTo(1500));
        assertThat(source.getValue("inference.ingest_processors._all.num_failures.min"), equalTo(100));
        assertThat(source.getValue("inference.ingest_processors._all.num_failures.max"), equalTo(500));
        assertThat(source.getValue("inference.deployments.count"), equalTo(2));
        assertThat(source.getValue("inference.deployments.inference_counts.total"), equalTo(12.0));
        assertThat(source.getValue("inference.deployments.inference_counts.min"), equalTo(3.0));
        assertThat(source.getValue("inference.deployments.inference_counts.max"), equalTo(5.0));
        assertThat(source.getValue("inference.deployments.inference_counts.avg"), equalTo(4.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.model_id"), equalTo("model_3"));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.task_type"), equalTo("ner"));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.last_access"), equalTo(lastAccess(3).toString()));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.inference_counts.total"), equalTo(3.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.inference_counts.min"), equalTo(3.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.inference_counts.max"), equalTo(3.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.0.inference_counts.avg"), equalTo(3.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.model_id"), equalTo("model_4"));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.task_type"), equalTo("text_expansion"));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.last_access"), equalTo(lastAccess(44).toString()));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.inference_counts.total"), equalTo(9.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.inference_counts.min"), equalTo(4.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.inference_counts.max"), equalTo(5.0));
        assertThat(source.getValue("inference.deployments.stats_by_model.1.inference_counts.avg"), equalTo(4.5));
        assertThat(source.getValue("inference.deployments.model_sizes_bytes.total"), equalTo(1300.0));
        assertThat(source.getValue("inference.deployments.model_sizes_bytes.min"), equalTo(300.0));
        assertThat(source.getValue("inference.deployments.model_sizes_bytes.max"), equalTo(1000.0));
        assertThat(source.getValue("inference.deployments.model_sizes_bytes.avg"), equalTo(650.0));
        assertThat(source.getValue("inference.deployments.time_ms.avg"), closeTo(44.0, 1e-10));
    }
}
272566.722122elasticsearch
public SeekStatus seekCeil(BytesRef target) throws IOException {
    if (fr.index == null) {
        throw new IllegalStateException("terms index was not loaded");
    }
    term.grow(1 + target.length);
    assert clearEOF();
    FST.Arc<BytesRef> arc;
    int targetUpto;
    BytesRef output;
    targetBeforeCurrentLength = currentFrame.ord;
    if (currentFrame != staticFrame) {
        arc = arcs[0];
        assert arc.isFinal();
        output = arc.output();
        targetUpto = 0;
        SegmentTermsEnumFrame lastFrame = stack[0];
        assert validIndexPrefix <= term.length();
        final int targetLimit = Math.min(target.length, validIndexPrefix);
        int cmp = 0;
        while (targetUpto < targetLimit) {
            cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
            if (cmp != 0) {
                break;
            }
            arc = arcs[1 + targetUpto];
            assert arc.label() == (target.bytes[target.offset + targetUpto] & 0xFF) : "arc.label=" + (char) arc.label() + " targetLabel=" + (char) (target.bytes[target.offset + targetUpto] & 0xFF);
            if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
                output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
            }
            if (arc.isFinal()) {
                lastFrame = stack[1 + lastFrame.ord];
            }
            targetUpto++;
        }
        if (cmp == 0) {
            final int targetUptoMid = targetUpto;
            final int targetLimit2 = Math.min(target.length, term.length());
            while (targetUpto < targetLimit2) {
                cmp = (term.byteAt(targetUpto) & 0xFF) - (target.bytes[target.offset + targetUpto] & 0xFF);
                if (cmp != 0) {
                    break;
                }
                targetUpto++;
            }
            if (cmp == 0) {
                cmp = term.length() - target.length;
            }
            targetUpto = targetUptoMid;
        }
        if (cmp < 0) {
            currentFrame = lastFrame;
        } else if (cmp > 0) {
            targetBeforeCurrentLength = 0;
            currentFrame = lastFrame;
            currentFrame.rewind();
        } else {
            assert term.length() == target.length;
            if (termExists) {
                return SeekStatus.FOUND;
            } else {
            }
        }
    } else {
        targetBeforeCurrentLength = -1;
        arc = fr.index.getFirstArc(arcs[0]);
        assert arc.isFinal();
        assert arc.output() != null;
        output = arc.output();
        currentFrame = staticFrame;
        targetUpto = 0;
        currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), 0);
    }
    while (targetUpto < target.length) {
        final int targetLabel = target.bytes[target.offset + targetUpto] & 0xFF;
        final FST.Arc<BytesRef> nextArc = fr.index.findTargetArc(targetLabel, arc, getArc(1 + targetUpto), fstReader);
        if (nextArc == null) {
            validIndexPrefix = currentFrame.prefix;
            currentFrame.scanToFloorFrame(target);
            currentFrame.loadBlock();
            final SeekStatus result = currentFrame.scanToTerm(target, false);
            if (result == SeekStatus.END) {
                term.copyBytes(target);
                termExists = false;
                if (next() != null) {
                    return SeekStatus.NOT_FOUND;
                } else {
                    return SeekStatus.END;
                }
            } else {
                return result;
            }
        } else {
            term.setByteAt(targetUpto, (byte) targetLabel);
            arc = nextArc;
            assert arc.output() != null;
            if (arc.output() != Lucene40BlockTreeTermsReader.NO_OUTPUT) {
                output = Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.output());
            }
            targetUpto++;
            if (arc.isFinal()) {
                currentFrame = pushFrame(arc, Lucene40BlockTreeTermsReader.FST_OUTPUTS.add(output, arc.nextFinalOutput()), targetUpto);
            }
        }
    }
    validIndexPrefix = currentFrame.prefix;
    currentFrame.scanToFloorFrame(target);
    currentFrame.loadBlock();
    final SeekStatus result = currentFrame.scanToTerm(target, false);
    if (result == SeekStatus.END) {
        term.copyBytes(target);
        termExists = false;
        if (next() != null) {
            return SeekStatus.NOT_FOUND;
        } else {
            return SeekStatus.END;
        }
    } else {
        return result;
    }
}
272771.0727112elasticsearch
private void printSeekState(PrintStream out) throws IOException {
    if (currentFrame == staticFrame) {
        out.println("  no prior seek");
    } else {
        out.println("  prior seek state:");
        int ord = 0;
        boolean isSeekFrame = true;
        while (true) {
            SegmentTermsEnumFrame f = getFrame(ord);
            assert f != null;
            final BytesRef prefix = new BytesRef(term.get().bytes, 0, f.prefix);
            if (f.nextEnt == -1) {
                out.println("    frame " + (isSeekFrame ? "(seek)" : "(next)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS : 0) + (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0)) + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
            } else {
                out.println("    frame " + (isSeekFrame ? "(seek, loaded)" : "(next, loaded)") + " ord=" + ord + " fp=" + f.fp + (f.isFloor ? (" (fpOrig=" + f.fpOrig + ")") : "") + " prefixLen=" + f.prefix + " prefix=" + prefix + " nextEnt=" + f.nextEnt + (f.nextEnt == -1 ? "" : (" (of " + f.entCount + ")")) + " hasTerms=" + f.hasTerms + " isFloor=" + f.isFloor + " code=" + ((f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) + (f.hasTerms ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS : 0) + (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0)) + " lastSubFP=" + f.lastSubFP + " isLastInFloor=" + f.isLastInFloor + " mdUpto=" + f.metaDataUpto + " tbOrd=" + f.getTermBlockOrd());
            }
            if (fr.index != null) {
                assert isSeekFrame == false || f.arc != null : "isSeekFrame=" + isSeekFrame + " f.arc=" + f.arc;
                if (f.prefix > 0 && isSeekFrame && f.arc.label() != (term.byteAt(f.prefix - 1) & 0xFF)) {
                    out.println("      broken seek state: arc.label=" + (char) f.arc.label() + " vs term byte=" + (char) (term.byteAt(f.prefix - 1) & 0xFF));
                    throw new RuntimeException("seek state is broken");
                }
                BytesRef output = Util.get(fr.index, prefix);
                if (output == null) {
                    out.println("      broken seek state: prefix is not final in index");
                    throw new RuntimeException("seek state is broken");
                } else if (isSeekFrame && f.isFloor == false) {
                    final ByteArrayDataInput reader = new ByteArrayDataInput(output.bytes, output.offset, output.length);
                    final long codeOrig = reader.readVLong();
                    final long code = (f.fp << Lucene40BlockTreeTermsReader.OUTPUT_FLAGS_NUM_BITS) | (f.hasTerms ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_HAS_TERMS : 0) | (f.isFloor ? Lucene40BlockTreeTermsReader.OUTPUT_FLAG_IS_FLOOR : 0);
                    if (codeOrig != code) {
                        out.println("      broken seek state: output code=" + codeOrig + " doesn't match frame code=" + code);
                        throw new RuntimeException("seek state is broken");
                    }
                }
            }
            if (f == currentFrame) {
                break;
            }
            if (f.prefix == validIndexPrefix) {
                isSeekFrame = false;
            }
            ord++;
        }
    }
}
272377.082177elasticsearch
public void testAreBoundariesInvalid() {
    Object[][] tests = { { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), randomBoolean(), "2022-01-01", randomTextType(), randomBoolean(), false }, { d("2021-01-01"), DATETIME, "2022-01-01", randomTextType(), randomBoolean(), "2021-01-01", randomTextType(), randomBoolean(), true }, { d("2021-01-01"), DATETIME, "now-10y", randomTextType(), randomBoolean(), "2022-01-01", randomTextType(), randomBoolean(), false }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), randomBoolean(), "now+10y", randomTextType(), randomBoolean(), false }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), randomBoolean(), "now-100y", randomTextType(), randomBoolean(), false }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), true, "2021-01-01", randomTextType(), true, false }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), false, "2021-01-01", randomTextType(), true, true }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), true, "2021-01-01", randomTextType(), false, true }, { d("2021-01-01"), DATETIME, "2021-01-01", randomTextType(), false, "2021-01-01", randomTextType(), false, true }, { d("2021-01-01"), DATETIME, d("2022-01-01"), DATETIME, randomBoolean(), "2021-01-01", randomTextType(), randomBoolean(), true }, { d("2021-01-01"), DATETIME, d("2021-01-01"), DATETIME, false, "2021-01-01", randomTextType(), false, true }, { d("2021-01-01"), DATETIME, d("2021-01-01"), DATETIME, false, d("2021-01-01"), DATETIME, false, true }, { d("2021-01-01"), DATETIME, d("2021-01-01"), DATETIME, true, "2021-01-01", randomTextType(), true, false }, { d("2021-01-01"), DATETIME, d("2021-01-01"), DATETIME, true, d("2021-01-01"), DATETIME, true, false }, { randomAlphaOfLength(10), randomTextType(), d("2021-01-01"), DATETIME, randomBoolean(), "2022-01-01", randomTextType(), randomBoolean(), false }, { randomAlphaOfLength(10), randomTextType(), "2021-01-01", randomTextType(), randomBoolean(), d("2022-01-01"), DATETIME, randomBoolean(), false }, { randomAlphaOfLength(10), randomTextType(), d("2022-01-01"), DATETIME, randomBoolean(), "2021-01-01", randomTextType(), randomBoolean(), true }, { randomAlphaOfLength(10), randomTextType(), "2022-01-01", randomTextType(), randomBoolean(), d("2021-01-01"), DATETIME, randomBoolean(), true }, { randomAlphaOfLength(10), randomTextType(), d("2022-01-01"), DATETIME, randomBoolean(), d("2021-01-01"), DATETIME, randomBoolean(), true }, { randomAlphaOfLength(10), randomTextType(), "now-10y", randomTextType(), randomBoolean(), d("2022-01-01"), DATETIME, randomBoolean(), false }, { randomAlphaOfLength(10), randomTextType(), d("2021-01-01"), DATETIME, true, "2021-01-01", randomTextType(), true, false }, { randomAlphaOfLength(10), randomTextType(), d("2021-01-01"), DATETIME, false, "2021-01-01", randomTextType(), true, true }, { randomAlphaOfLength(10), randomTextType(), "2021-01-01", randomTextType(), true, d("2021-01-01"), DATETIME, false, true }, { randomAlphaOfLength(10), randomTextType(), d("2021-01-01"), DATETIME, false, d("2021-01-01"), DATETIME, false, true }, { randomAlphaOfLength(10), randomTextType(), "a", randomTextType(), randomBoolean(), "b", randomTextType(), randomBoolean(), false }, { randomAlphaOfLength(10), randomTextType(), "b", randomTextType(), randomBoolean(), "a", randomTextType(), randomBoolean(), true }, { randomAlphaOfLength(10), randomTextType(), "a", randomTextType(), false, "a", randomTextType(), false, true }, { 10, randomNumericType(), 1, randomNumericType(), randomBoolean(), 10, randomNumericType(), randomBoolean(), false }, { 10, randomNumericType(), 10, randomNumericType(), randomBoolean(), 1, randomNumericType(), randomBoolean(), true }, { 10, randomNumericType(), 1, randomNumericType(), false, 1, randomNumericType(), randomBoolean(), true }, { 10, randomNumericType(), 1, randomNumericType(), randomBoolean(), 1, randomNumericType(), false, true }, { 10, randomNumericType(), 1.0, randomNumericType(), randomBoolean(), 10, randomNumericType(), randomBoolean(), false }, { 10, randomNumericType(), 1, randomNumericType(), randomBoolean(), 10.D, randomNumericType(), randomBoolean(), false }, { 10, randomNumericType(), 10.0, randomNumericType(), randomBoolean(), 1, randomNumericType(), randomBoolean(), true } };
    for (int i = 0; i < tests.length; i++) {
        Object[] test = tests[i];
        Range range = new Range(Source.EMPTY, l(test[0], (DataType) test[1]), l(test[2], (DataType) test[3]), (Boolean) test[4], l(test[5], (DataType) test[6]), (Boolean) test[7], ZoneId.systemDefault());
        assertEquals("failed on test " + i + ": " + Arrays.toString(test), test[8], range.areBoundariesInvalid());
    }
}
273854.277135elasticsearch
public void testValidateSearchContext() throws Exception {
    final ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class);
    when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMinutes(between(1, 10))));
    try (LegacyReaderContext readerContext = new LegacyReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), 0L), indexService, shard, shard.acquireSearcherSupplier(), shardSearchRequest, Long.MAX_VALUE)) {
        readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false));
        final IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class);
        readerContext.putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl);
        MockLicenseState licenseState = mock(MockLicenseState.class);
        when(licenseState.isAllowed(Security.AUDITING_FEATURE)).thenReturn(true);
        ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
        final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext);
        AuditTrail auditTrail = mock(AuditTrail.class);
        AuditTrailService auditTrailService = new AuditTrailService(auditTrail, licenseState);
        SecuritySearchOperationListener listener = new SecuritySearchOperationListener(securityContext, auditTrailService);
        try (StoredContext ignore = threadContext.newStoredContext()) {
            Authentication authentication = AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef("realm", "file", "node")).build(false);
            authentication.writeToContext(threadContext);
            listener.validateReaderContext(readerContext, Empty.INSTANCE);
            assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl));
            verifyNoMoreInteractions(auditTrail);
        }
        try (StoredContext ignore = threadContext.newStoredContext()) {
            final String nodeName = randomAlphaOfLengthBetween(1, 8);
            final String realmName = randomAlphaOfLengthBetween(1, 16);
            Authentication authentication = AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef(realmName, "file", nodeName)).build(false);
            authentication.writeToContext(threadContext);
            listener.validateReaderContext(readerContext, Empty.INSTANCE);
            assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl));
            verifyNoMoreInteractions(auditTrail);
        }
        try (StoredContext ignore = threadContext.newStoredContext()) {
            final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8);
            final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16);
            final String type = randomAlphaOfLengthBetween(5, 16);
            Authentication authentication = AuthenticationTestHelper.builder().user(new User("test", "role")).realmRef(new RealmRef(realmName, type, nodeName)).build(false);
            authentication.writeToContext(threadContext);
            threadContext.putTransient(ORIGINATING_ACTION_KEY, "action");
            threadContext.putTransient(AUTHORIZATION_INFO_KEY, (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getEffectiveSubject().getUser().roles()));
            final InternalScrollSearchRequest request = new InternalScrollSearchRequest();
            SearchContextMissingException expected = expectThrows(SearchContextMissingException.class, () -> listener.validateReaderContext(readerContext, request));
            assertEquals(readerContext.id(), expected.contextId());
            assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue());
            verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), authzInfoRoles(authentication.getEffectiveSubject().getUser().roles()));
        }
        try (StoredContext ignore = threadContext.newStoredContext()) {
            final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8);
            final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16);
            final String type = randomAlphaOfLengthBetween(5, 16);
            Authentication authentication = AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(realmName, type, nodeName)).runAs().user(new User("test", "role")).realmRef(new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", nodeName)).build();
            authentication.writeToContext(threadContext);
            threadContext.putTransient(ORIGINATING_ACTION_KEY, "action");
            final InternalScrollSearchRequest request = new InternalScrollSearchRequest();
            listener.validateReaderContext(readerContext, request);
            assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl));
            verifyNoMoreInteractions(auditTrail);
        }
        try (StoredContext ignore = threadContext.newStoredContext()) {
            final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8);
            final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16);
            final String type = randomAlphaOfLengthBetween(5, 16);
            Authentication authentication = AuthenticationTestHelper.builder().user(new User("authenticated", "runas")).realmRef(new RealmRef(realmName, type, nodeName)).build(false);
            authentication.writeToContext(threadContext);
            threadContext.putTransient(ORIGINATING_ACTION_KEY, "action");
            threadContext.putTransient(AUTHORIZATION_INFO_KEY, (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getEffectiveSubject().getUser().roles()));
            final InternalScrollSearchRequest request = new InternalScrollSearchRequest();
            SearchContextMissingException expected = expectThrows(SearchContextMissingException.class, () -> listener.validateReaderContext(readerContext, request));
            assertEquals(readerContext.id(), expected.contextId());
            assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue());
            verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), authzInfoRoles(authentication.getEffectiveSubject().getUser().roles()));
        }
    }
}
273778.871154elasticsearch
public void testAddition() {
    ZoneId zoneId = ZoneId.of("Etc/GMT-10");
    Literal dateTime = l(dateTime(2019, 9, 3, 18, 10, 37, 123456789));
    assertEquals("2029-09-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("years"), l(10), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2009-09-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("years"), l(-10), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2022-03-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("quarters"), l(10), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2017-03-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("quarters"), l(-10), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2021-05-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("month"), l(20), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2018-01-04T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("month"), l(-20), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2020-05-01T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("day"), l(240), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-05-07T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("day"), l(-120), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2020-12-25T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("dayofyear"), l(478), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2018-05-14T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("dayofyear"), l(-478), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2021-12-22T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("weeks"), l(120), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2017-05-17T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("weeks"), l(-120), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2053-06-22T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("weekday"), l(12345), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("1985-11-16T04:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("weekday"), l(-12345), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2020-07-05T05:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("hours"), l(7321), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2018-11-03T03:10:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("hours"), l(-7321), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2021-07-21T01:04:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("minute"), l(987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2017-10-18T07:16:37.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("minute"), l(-987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2020-02-01T11:51:31.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("seconds"), l(12987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-04-06T20:29:43.123456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("seconds"), l(-12987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-19T04:56:42.555456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("ms"), l(1298765432), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-08-20T03:24:31.691456789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("ms"), l(-1298765432), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:12:41.111110789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("mcs"), l(123987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:08:33.135802789+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("mcs"), l(-123987654), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:37.935855554+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("nanoseconds"), l(812398765), dateTime, zoneId).makePipe().asProcessor().process(null)));
    assertEquals("2019-09-04T04:10:36.311058024+10:00", toString((ZonedDateTime) new DateAdd(Source.EMPTY, l("nanoseconds"), l(-812398765), dateTime, zoneId).makePipe().asProcessor().process(null)));
}
271524.51190elasticsearch
public void testGetIndicesToQuery() {
    LatestChangeCollector changeCollector = new LatestChangeCollector("timestamp");
    long[] indexSequenceIds1 = { 25L, 25L, 25L };
    long[] indexSequenceIds2 = { 324L, 2425L, 2225L };
    long[] indexSequenceIds3 = { 244L, 225L, 2425L };
    long[] indexSequenceIds4 = { 2005L, 2445L, 2425L };
    long[] indexSequenceIds3_1 = { 246L, 255L, 2485L };
    long[] indexSequenceIds4_1 = { 2105L, 2545L, 2525L };
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123456789L)), equalTo(Collections.emptySet()));
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3_1, "index-4", indexSequenceIds4_1), 123456789L)), equalTo(Set.of("index-3", "index-4")));
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3_1, "index-4", indexSequenceIds4), 123456789L)), equalTo(Collections.singleton("index-3")));
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-3", indexSequenceIds3_1, "index-4", indexSequenceIds4_1), 123456789L)), equalTo(Set.of("index-3", "index-4")));
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3_1, "index-4", indexSequenceIds4_1), 123456789L)), equalTo(Set.of("index-1", "index-3", "index-4")));
    assertThat(changeCollector.getIndicesToQuery(new TransformCheckpoint("t_id", 123513L, 42L, Map.of("index-1", indexSequenceIds1, "index-2", indexSequenceIds2, "index-3", indexSequenceIds3, "index-4", indexSequenceIds4), 123543L), new TransformCheckpoint("t_id", 123456759L, 43L, Map.of("index-2", indexSequenceIds2, "index-3", indexSequenceIds3_1, "index-4", indexSequenceIds4_1), 123456789L)), equalTo(Set.of("index-3", "index-4")));
}
271939.9921138elasticsearch
public static ArrayCompareCondition parse(Clock clock, String watchId, XContentParser parser) throws IOException {
    if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
        throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object but found [{}] " + "instead", TYPE, watchId, parser.currentToken());
    }
    String arrayPath = null;
    String path = null;
    Op op = null;
    Object value = null;
    Quantifier quantifier = null;
    XContentParser.Token token;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            arrayPath = parser.currentName();
        } else if (arrayPath == null) {
            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating the " + "compared path, but found [{}] instead", TYPE, watchId, token);
        } else if (token == XContentParser.Token.START_OBJECT) {
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    if (parser.currentName().equals("path")) {
                        parser.nextToken();
                        path = parser.text();
                    } else {
                        try {
                            op = Op.resolve(parser.currentName());
                        } catch (IllegalArgumentException iae) {
                            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. unknown comparison " + "operator [{}]", TYPE, watchId, parser.currentName(), iae);
                        }
                        token = parser.nextToken();
                        if (token == XContentParser.Token.START_OBJECT) {
                            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                                if (token == XContentParser.Token.FIELD_NAME) {
                                    if (parser.currentName().equals("value")) {
                                        token = parser.nextToken();
                                        if (op.supportsStructures() == false && token.isValue() == false && token != XContentParser.Token.VALUE_NULL) {
                                            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + "compared value for [{}] with operation [{}] must either be a numeric, string, " + "boolean or null value, but found [{}] instead", TYPE, watchId, path, op.name().toLowerCase(Locale.ROOT), token);
                                        }
                                        value = XContentUtils.readValue(parser, token);
                                    } else if (parser.currentName().equals("quantifier")) {
                                        parser.nextToken();
                                        try {
                                            quantifier = Quantifier.resolve(parser.text());
                                        } catch (IllegalArgumentException iae) {
                                            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + "unknown comparison quantifier [{}]", TYPE, watchId, parser.text(), iae);
                                        }
                                    } else {
                                        throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. " + "expected a field indicating the comparison value or comparison quantifier, but found" + " [{}] instead", TYPE, watchId, parser.currentName());
                                    }
                                } else {
                                    throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a " + "field indicating the comparison value or comparison quantifier, but found [{}] instead", TYPE, watchId, token);
                                }
                            }
                        } else {
                            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object " + "for field [{}] but found [{}] instead", TYPE, watchId, op.id(), token);
                        }
                    }
                } else {
                    throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected a field indicating" + " the compared path or a comparison operator, but found [{}] instead", TYPE, watchId, token);
                }
            }
        } else {
            throw new ElasticsearchParseException("could not parse [{}] condition for watch [{}]. expected an object for field [{}] " + "but found [{}] instead", TYPE, watchId, path, token);
        }
    }
    if (path == null) {
        path = "";
    }
    if (quantifier == null) {
        quantifier = Quantifier.SOME;
    }
    return new ArrayCompareCondition(arrayPath, path, op, value, quantifier, clock);
}
272275.6624128gwt
private Object primaryExpr(TokenStream ts) throws IOException, JavaScriptException {
    int tt;
    Object pn;
    ts.flags |= ts.TSF_REGEXP;
    tt = ts.getToken();
    ts.flags &= ~ts.TSF_REGEXP;
    switch(tt) {
        case TokenStream.FUNCTION:
            return function(ts, true);
        case TokenStream.LB:
            {
                sourceAdd((char) ts.LB);
                pn = nf.createLeaf(ts.ARRAYLIT);
                ts.flags |= ts.TSF_REGEXP;
                boolean matched = ts.matchToken(ts.RB);
                ts.flags &= ~ts.TSF_REGEXP;
                if (!matched) {
                    boolean first = true;
                    do {
                        ts.flags |= ts.TSF_REGEXP;
                        tt = ts.peekToken();
                        ts.flags &= ~ts.TSF_REGEXP;
                        if (!first)
                            sourceAdd((char) ts.COMMA);
                        else
                            first = false;
                        if (tt == ts.RB) {
                            break;
                        }
                        if (tt == ts.COMMA) {
                            nf.addChildToBack(pn, nf.createLeaf(ts.PRIMARY, ts.UNDEFINED));
                        } else {
                            nf.addChildToBack(pn, assignExpr(ts, false));
                        }
                    } while (ts.matchToken(ts.COMMA));
                    mustMatchToken(ts, ts.RB, "msg.no.bracket.arg");
                }
                sourceAdd((char) ts.RB);
                return nf.createArrayLiteral(pn);
            }
        case TokenStream.LC:
            {
                pn = nf.createLeaf(ts.OBJLIT);
                sourceAdd((char) ts.LC);
                if (!ts.matchToken(ts.RC)) {
                    boolean first = true;
                    commaloop: do {
                        Object property;
                        if (!first)
                            sourceAdd((char) ts.COMMA);
                        else
                            first = false;
                        tt = ts.getToken();
                        switch(tt) {
                            case TokenStream.NAME:
                                String name = ts.getString();
                                sourceAddString(ts.NAME, name);
                                property = nf.createName(ts.getString());
                                break;
                            case TokenStream.STRING:
                                String s = ts.getString();
                                sourceAddString(ts.NAME, s);
                                property = nf.createString(ts.getString());
                                break;
                            case TokenStream.NUMBER:
                                double n = ts.getNumber();
                                sourceAddNumber(n);
                                property = nf.createNumber(n);
                                break;
                            case TokenStream.RC:
                                ts.ungetToken(tt);
                                break commaloop;
                            default:
                                reportError(ts, "msg.bad.prop");
                                break commaloop;
                        }
                        mustMatchToken(ts, ts.COLON, "msg.no.colon.prop");
                        sourceAdd((char) ts.OBJLIT);
                        nf.addChildToBack(pn, property);
                        nf.addChildToBack(pn, assignExpr(ts, false));
                    } while (ts.matchToken(ts.COMMA));
                    mustMatchToken(ts, ts.RC, "msg.no.brace.prop");
                }
                sourceAdd((char) ts.RC);
                return nf.createObjectLiteral(pn);
            }
        case TokenStream.LP:
            sourceAdd((char) ts.LP);
            pn = expr(ts, false);
            sourceAdd((char) ts.GWT);
            mustMatchToken(ts, ts.GWT, "msg.no.paren");
            return pn;
        case TokenStream.NAME:
            String name = ts.getString();
            sourceAddString(ts.NAME, name);
            return nf.createName(name);
        case TokenStream.NUMBER:
            double n = ts.getNumber();
            sourceAddNumber(n);
            return nf.createNumber(n);
        case TokenStream.STRING:
            String s = ts.getString();
            sourceAddString(ts.STRING, s);
            return nf.createString(s);
        case TokenStream.REGEXP:
            {
                String flags = ts.regExpFlags;
                ts.regExpFlags = null;
                String re = ts.getString();
                sourceAddString(ts.REGEXP, '/' + re + '/' + flags);
                return nf.createRegExp(re, flags);
            }
        case TokenStream.PRIMARY:
            sourceAdd((char) ts.PRIMARY);
            sourceAdd((char) ts.getOp());
            return nf.createLeaf(ts.PRIMARY, ts.getOp());
        case TokenStream.RESERVED:
            reportError(ts, "msg.reserved.id");
            break;
        case TokenStream.ERROR:
            break;
        default:
            reportError(ts, "msg.syntax");
            break;
    }
    return null;
}
272741.071172gwt
public void testIncrDecr() {
    {
        Byte originalBoxedByte = boxedByte;
        assertEquals(unboxedByte, (byte) boxedByte++);
        assertEquals(unboxedByte + 1, (byte) boxedByte);
        boxedByte = originalBoxedByte;
        Integer[] ary = new Integer[] { 0, 10, 20, 30, 40, 50 };
        Integer idx = 2;
        assertEquals(20, (int) ary[idx++]++);
        assertEquals(21, (int) ary[2]);
        assertEquals(3, (int) idx);
        assertEquals(40, (int) ary[idx += 1]);
        assertEquals(4, (int) idx);
    }
    {
        Byte originalBoxedByte = boxedByte;
        boxedByte++;
        assertNotSame("Boxed byte modified in place", boxedByte, originalBoxedByte);
        assertEquals(unboxedByte + 1, (byte) boxedByte);
        boxedByte = originalBoxedByte;
        ++boxedByte;
        assertNotSame("Boxed byte modified in place", boxedByte, originalBoxedByte);
        assertEquals(unboxedByte + 1, (byte) boxedByte);
        boxedByte = originalBoxedByte;
        boxedByte--;
        assertNotSame("Boxed byte modified in place", boxedByte, originalBoxedByte);
        assertEquals(unboxedByte - 1, (byte) boxedByte);
        boxedByte = originalBoxedByte;
        --boxedByte;
        assertNotSame("Boxed byte modified in place", boxedByte, originalBoxedByte);
        assertEquals(unboxedByte - 1, (byte) boxedByte);
        boxedByte = originalBoxedByte;
    }
    {
        Character originalBoxedChar = boxedChar;
        boxedChar++;
        assertNotSame("Boxed character modified in place", boxedChar, originalBoxedChar);
        assertEquals(unboxedChar + 1, (char) boxedChar);
        boxedChar = originalBoxedChar;
        ++boxedChar;
        assertNotSame("Boxed character modified in place", boxedChar, originalBoxedChar);
        assertEquals(unboxedChar + 1, (char) boxedChar);
        boxedChar = originalBoxedChar;
        boxedChar--;
        assertNotSame("Boxed character modified in place", boxedChar, originalBoxedChar);
        assertEquals(unboxedChar - 1, (char) boxedChar);
        boxedChar = originalBoxedChar;
        --boxedChar;
        assertNotSame("Boxed character modified in place", boxedChar, originalBoxedChar);
        assertEquals(unboxedChar - 1, (char) boxedChar);
        boxedChar = originalBoxedChar;
    }
    {
        Short originalBoxedShort = boxedShort;
        boxedShort++;
        assertNotSame("Boxed short modified in place", boxedShort, originalBoxedShort);
        assertEquals(unboxedShort + 1, (short) boxedShort);
        boxedShort = originalBoxedShort;
        ++boxedShort;
        assertNotSame("Boxed short modified in place", boxedShort, originalBoxedShort);
        assertEquals(unboxedShort + 1, (short) boxedShort);
        boxedShort = originalBoxedShort;
        boxedShort--;
        assertNotSame("Boxed short modified in place", boxedShort, originalBoxedShort);
        assertEquals(unboxedShort - 1, (short) boxedShort);
        boxedShort = originalBoxedShort;
        --boxedShort;
        assertNotSame("Boxed short modified in place", boxedShort, originalBoxedShort);
        assertEquals(unboxedShort - 1, (short) boxedShort);
        boxedShort = originalBoxedShort;
    }
    {
        Integer originalBoxedInt = boxedInt;
        boxedInt++;
        assertNotSame("Boxed int modified in place", boxedInt, originalBoxedInt);
        assertEquals(unboxedInt + 1, (int) boxedInt);
        boxedInt = originalBoxedInt;
        ++boxedInt;
        assertNotSame("Boxed int modified in place", boxedInt, originalBoxedInt);
        assertEquals(unboxedInt + 1, (int) boxedInt);
        boxedInt = originalBoxedInt;
        boxedInt--;
        assertNotSame("Boxed int modified in place", boxedInt, originalBoxedInt);
        assertEquals(unboxedInt - 1, (int) boxedInt);
        boxedInt = originalBoxedInt;
        --boxedInt;
        assertNotSame("Boxed int modified in place", boxedInt, originalBoxedInt);
        assertEquals(unboxedInt - 1, (int) boxedInt);
        boxedInt = originalBoxedInt;
    }
    {
        Long originalBoxedLong = boxedLong;
        boxedLong++;
        assertNotSame("Boxed long modified in place", boxedLong, originalBoxedLong);
        assertEquals(unboxedLong + 1, (long) boxedLong);
        boxedLong = originalBoxedLong;
        ++boxedLong;
        assertNotSame("Boxed long modified in place", boxedLong, originalBoxedLong);
        assertEquals(unboxedLong + 1, (long) boxedLong);
        boxedLong = originalBoxedLong;
        boxedLong--;
        assertNotSame("Boxed long modified in place", boxedLong, originalBoxedLong);
        assertEquals(unboxedLong - 1, (long) boxedLong);
        boxedLong = originalBoxedLong;
        --boxedLong;
        assertNotSame("Boxed long modified in place", boxedLong, originalBoxedLong);
        assertEquals(unboxedLong - 1, (long) boxedLong);
        boxedLong = originalBoxedLong;
    }
    {
        Float originalBoxedFloat = boxedFloat;
        boxedFloat++;
        assertNotSame("Boxed float modified in place", boxedFloat, originalBoxedFloat);
        assertEquals(unboxedFloat + 1, (float) boxedFloat);
        boxedFloat = originalBoxedFloat;
        ++boxedFloat;
        assertNotSame("Boxed float modified in place", boxedFloat, originalBoxedFloat);
        assertEquals(unboxedFloat + 1, (float) boxedFloat);
        boxedFloat = originalBoxedFloat;
        boxedFloat--;
        assertNotSame("Boxed float modified in place", boxedFloat, originalBoxedFloat);
        assertEquals(unboxedFloat - 1, (float) boxedFloat);
        boxedFloat = originalBoxedFloat;
        --boxedFloat;
        assertNotSame("Boxed float modified in place", boxedFloat, originalBoxedFloat);
        assertEquals(unboxedFloat - 1, (float) boxedFloat);
        boxedFloat = originalBoxedFloat;
    }
    {
        Double originalBoxedDouble = boxedDouble;
        boxedDouble++;
        assertNotSame("Boxed double modified in place", boxedDouble, originalBoxedDouble);
        assertEquals(unboxedDouble + 1, (double) boxedDouble);
        boxedDouble = originalBoxedDouble;
        ++boxedDouble;
        assertNotSame("Boxed double modified in place", boxedDouble, originalBoxedDouble);
        assertEquals(unboxedDouble + 1, (double) boxedDouble);
        boxedDouble = originalBoxedDouble;
        boxedDouble--;
        assertNotSame("Boxed double modified in place", boxedDouble, originalBoxedDouble);
        assertEquals(unboxedDouble - 1, (double) boxedDouble);
        boxedDouble = originalBoxedDouble;
        --boxedDouble;
        assertNotSame("Boxed double modified in place", boxedDouble, originalBoxedDouble);
        assertEquals(unboxedDouble - 1, (double) boxedDouble);
        boxedDouble = originalBoxedDouble;
    }
}
272918.8719125hadoop
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) throws IOException {
    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
        resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
        setResponseHeader(resp);
        resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!");
        return;
    }
    if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
        resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        setResponseHeader(resp);
        resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + "environment is properly configured.");
        return;
    }
    pid = getInteger(req, "pid", pid);
    if (pid == null) {
        resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        setResponseHeader(resp);
        resp.getWriter().write("'pid' query parameter unspecified or unable to determine PID of current process.");
        return;
    }
    final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
    final Output output = getOutput(req);
    final Event event = getEvent(req);
    final Long interval = getLong(req, "interval");
    final Integer jstackDepth = getInteger(req, "jstackdepth", null);
    final Long bufsize = getLong(req, "bufsize");
    final boolean thread = req.getParameterMap().containsKey("thread");
    final boolean simple = req.getParameterMap().containsKey("simple");
    final Integer width = getInteger(req, "width", null);
    final Integer height = getInteger(req, "height", null);
    final Double minwidth = getMinWidth(req);
    final boolean reverse = req.getParameterMap().containsKey("reverse");
    if (process == null || !process.isAlive()) {
        try {
            int lockTimeoutSecs = 3;
            if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
                try {
                    File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase());
                    List<String> cmd = new ArrayList<>();
                    cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
                    cmd.add("-e");
                    cmd.add(event.getInternalName());
                    cmd.add("-d");
                    cmd.add("" + duration);
                    cmd.add("-o");
                    cmd.add(output.name().toLowerCase());
                    cmd.add("-f");
                    cmd.add(outputFile.getAbsolutePath());
                    if (interval != null) {
                        cmd.add("-i");
                        cmd.add(interval.toString());
                    }
                    if (jstackDepth != null) {
                        cmd.add("-j");
                        cmd.add(jstackDepth.toString());
                    }
                    if (bufsize != null) {
                        cmd.add("-b");
                        cmd.add(bufsize.toString());
                    }
                    if (thread) {
                        cmd.add("-t");
                    }
                    if (simple) {
                        cmd.add("-s");
                    }
                    if (width != null) {
                        cmd.add("--width");
                        cmd.add(width.toString());
                    }
                    if (height != null) {
                        cmd.add("--height");
                        cmd.add(height.toString());
                    }
                    if (minwidth != null) {
                        cmd.add("--minwidth");
                        cmd.add(minwidth.toString());
                    }
                    if (reverse) {
                        cmd.add("--reverse");
                    }
                    cmd.add(pid.toString());
                    if (!isTestRun) {
                        process = ProcessUtils.runCmdAsync(cmd);
                    }
                    setResponseHeader(resp);
                    resp.setStatus(HttpServletResponse.SC_ACCEPTED);
                    String relativeUrl = "/prof-output-hadoop/" + outputFile.getName();
                    resp.getWriter().write("Started [" + event.getInternalName() + "] profiling. This page will automatically redirect to " + relativeUrl + " after " + duration + " seconds. " + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + "\n\nCommand:\n" + Joiner.on(" ").join(cmd));
                    int refreshDelay = getInteger(req, "refreshDelay", 0);
                    resp.setHeader("Refresh", (duration + refreshDelay) + ";" + relativeUrl);
                    resp.getWriter().flush();
                } finally {
                    profilerLock.unlock();
                }
            } else {
                setResponseHeader(resp);
                resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                resp.getWriter().write("Unable to acquire lock. Another instance of profiler might be running.");
                LOG.warn("Unable to acquire lock in {} seconds. Another instance of profiler might be" + " running.", lockTimeoutSecs);
            }
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while acquiring profile lock.", e);
            resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        }
    } else {
        setResponseHeader(resp);
        resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        resp.getWriter().write("Another instance of profiler is already running.");
    }
}
272621.3722123hadoop
public int bind(String[] args) {
    Option rest = Option.builder("rest").argName("rest").hasArg().desc("rest Option").build();
    Option webui = Option.builder("webui").argName("webui").hasArg().desc("webui Option").build();
    Option inet = Option.builder("inet").argName("inet").desc("inet Option").build();
    Option port = Option.builder("p").argName("port").hasArg().desc("port to listen on [9999]").build();
    Option host = Option.builder("h").argName("host").hasArg().desc("host name").build();
    Option apiOpt = Option.builder("api").argName("api").hasArg().desc("api").build();
    Options inetOption = new Options();
    inetOption.addOption(inet);
    inetOption.addOption(port);
    inetOption.addOption(host);
    inetOption.addOption(apiOpt);
    Options webuiOpt = new Options();
    webuiOpt.addOption(webui);
    webuiOpt.addOption(apiOpt);
    Options restOpt = new Options();
    restOpt.addOption(rest);
    restOpt.addOption(apiOpt);
    CommandLineParser parser = new GnuParser();
    ServiceRecord sr = new ServiceRecord();
    CommandLine line;
    if (args.length <= 1) {
        return usageError("Invalid syntax ", BIND_USAGE);
    }
    if (args[1].equals("-inet")) {
        int portNum;
        String hostName;
        String api;
        try {
            line = parser.parse(inetOption, args);
        } catch (ParseException exp) {
            return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
        }
        if (line.hasOption("inet") && line.hasOption("p") && line.hasOption("h") && line.hasOption("api")) {
            try {
                portNum = Integer.parseInt(line.getOptionValue("p"));
            } catch (NumberFormatException exp) {
                return usageError("Invalid Port - int required" + exp.getMessage(), BIND_USAGE);
            }
            hostName = line.getOptionValue("h");
            api = line.getOptionValue("api");
            sr.addExternalEndpoint(inetAddrEndpoint(api, ProtocolTypes.PROTOCOL_HADOOP_IPC, hostName, portNum));
        } else {
            return usageError("Missing options: must have host, port and api", BIND_USAGE);
        }
    } else if (args[1].equals("-webui")) {
        try {
            line = parser.parse(webuiOpt, args);
        } catch (ParseException exp) {
            return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
        }
        if (line.hasOption("webui") && line.hasOption("api")) {
            URI theUri;
            try {
                theUri = new URI(line.getOptionValue("webui"));
            } catch (URISyntaxException e) {
                return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
            }
            sr.addExternalEndpoint(webEndpoint(line.getOptionValue("api"), theUri));
        } else {
            return usageError("Missing options: must have value for uri and api", BIND_USAGE);
        }
    } else if (args[1].equals("-rest")) {
        try {
            line = parser.parse(restOpt, args);
        } catch (ParseException exp) {
            return usageError("Invalid syntax " + exp.getMessage(), BIND_USAGE);
        }
        if (line.hasOption("rest") && line.hasOption("api")) {
            URI theUri = null;
            try {
                theUri = new URI(line.getOptionValue("rest"));
            } catch (URISyntaxException e) {
                return usageError("Invalid URI: " + e.getMessage(), BIND_USAGE);
            }
            sr.addExternalEndpoint(restEndpoint(line.getOptionValue("api"), theUri));
        } else {
            return usageError("Missing options: must have value for uri and api", BIND_USAGE);
        }
    } else {
        return usageError("Invalid syntax", BIND_USAGE);
    }
    @SuppressWarnings("unchecked")
    List<String> argsList = line.getArgList();
    if (argsList.size() != 2) {
        return usageError("bind requires exactly one path argument", BIND_USAGE);
    }
    if (!validatePath(argsList.get(1))) {
        return -1;
    }
    try {
        registry.bind(argsList.get(1), sr, BindFlags.OVERWRITE);
        return 0;
    } catch (Exception e) {
        syserr.println(analyzeException("bind", e, argsList));
    }
    return -1;
}
272466.4624118hadoop
public static MD5Hash receiveFile(String url, List<File> localPaths, Storage dstStorage, boolean getChecksum, long advertisedSize, MD5Hash advertisedDigest, String fsImageName, InputStream stream, DataTransferThrottler throttler) throws IOException {
    long startTime = Time.monotonicNow();
    Map<FileOutputStream, File> streamPathMap = new HashMap<>();
    StringBuilder xferStats = new StringBuilder();
    double xferCombined = 0;
    if (localPaths != null) {
        List<File> newLocalPaths = new ArrayList<>();
        for (File localPath : localPaths) {
            if (localPath.isDirectory()) {
                if (fsImageName == null) {
                    throw new IOException("No filename header provided by server");
                }
                newLocalPaths.add(new File(localPath, fsImageName));
            } else {
                newLocalPaths.add(localPath);
            }
        }
        localPaths = newLocalPaths;
    }
    long received = 0;
    MessageDigest digester = null;
    if (getChecksum) {
        digester = MD5Hash.getDigester();
        stream = new DigestInputStream(stream, digester);
    }
    boolean finishedReceiving = false;
    int num = 1;
    List<FileOutputStream> outputStreams = Lists.newArrayList();
    try {
        if (localPaths != null) {
            for (File f : localPaths) {
                try {
                    if (f.exists()) {
                        LOG.warn("Overwriting existing file " + f + " with file downloaded from " + url);
                    }
                    FileOutputStream fos = new FileOutputStream(f);
                    outputStreams.add(fos);
                    streamPathMap.put(fos, f);
                } catch (IOException ioe) {
                    LOG.warn("Unable to download file " + f, ioe);
                    if (dstStorage != null && (dstStorage instanceof StorageErrorReporter)) {
                        ((StorageErrorReporter) dstStorage).reportErrorOnFile(f);
                    }
                }
            }
            if (outputStreams.isEmpty()) {
                throw new IOException("Unable to download to any storage directory");
            }
        }
        byte[] buf = new byte[IO_FILE_BUFFER_SIZE];
        while (num > 0) {
            num = stream.read(buf);
            if (num > 0) {
                received += num;
                for (FileOutputStream fos : outputStreams) {
                    fos.write(buf, 0, num);
                }
                if (throttler != null) {
                    throttler.throttle(num);
                }
            }
        }
        finishedReceiving = true;
        double xferSec = Math.max(((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
        long xferKb = received / 1024;
        xferCombined += xferSec;
        xferStats.append(String.format(" The file download took %.2fs at %.2f KB/s.", xferSec, xferKb / xferSec));
    } finally {
        stream.close();
        for (FileOutputStream fos : outputStreams) {
            long flushStartTime = Time.monotonicNow();
            fos.getChannel().force(true);
            fos.close();
            double writeSec = Math.max(((float) (Time.monotonicNow() - flushStartTime)) / 1000.0, 0.001);
            xferCombined += writeSec;
            xferStats.append(String.format(" Synchronous (fsync) write to disk of " + streamPathMap.get(fos).getAbsolutePath() + " took %.2fs.", writeSec));
        }
        if (!finishedReceiving) {
            deleteTmpFiles(localPaths);
        }
        if (finishedReceiving && received != advertisedSize) {
            deleteTmpFiles(localPaths);
            throw new IOException("File " + url + " received length " + received + " is not of the advertised size " + advertisedSize + ". Fsimage name: " + fsImageName + " lastReceived: " + num);
        }
    }
    xferStats.insert(0, String.format("Combined time for file download and" + " fsync to all disks took %.2fs.", xferCombined));
    LOG.info(xferStats.toString());
    if (digester != null) {
        MD5Hash computedDigest = new MD5Hash(digester.digest());
        if (advertisedDigest != null && !computedDigest.equals(advertisedDigest)) {
            deleteTmpFiles(localPaths);
            throw new IOException("File " + url + " computed digest " + computedDigest + " does not match advertised digest " + advertisedDigest);
        }
        return computedDigest;
    } else {
        return null;
    }
}
273022.834145hadoop
public void testDeDuplication() throws Exception {
    shutdown();
    AclStorage.getUniqueAclFeatures().clear();
    startCluster();
    setUp();
    int currentSize = 0;
    Path p1 = new Path("/testDeduplication");
    {
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "testdeduplicateuser", ALL), aclEntry(DEFAULT, GROUP, "testdeduplicategroup", ALL));
        fs.mkdirs(p1);
        fs.modifyAclEntries(p1, aclSpec);
        assertEquals("One more ACL feature should be unique", currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        currentSize++;
    }
    Path child1 = new Path(p1, "child1");
    AclFeature child1AclFeature;
    {
        fs.mkdirs(child1);
        assertEquals("One more ACL feature should be unique", currentSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        child1AclFeature = getAclFeature(child1, cluster);
        assertEquals("Reference count should be 1", 1, child1AclFeature.getRefCount());
        currentSize++;
    }
    Path child2 = new Path(p1, "child2");
    {
        fs.mkdirs(child2);
        assertEquals("existing AclFeature should be re-used", currentSize, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
        AclFeature child2AclFeature = getAclFeature(child1, cluster);
        assertSame("Same Aclfeature should be re-used", child1AclFeature, child2AclFeature);
        assertEquals("Reference count should be 2", 2, child2AclFeature.getRefCount());
    }
    {
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "user1", ALL));
        fs.modifyAclEntries(child1, aclSpec);
        AclFeature modifiedAclFeature = getAclFeature(child1, cluster);
        assertEquals("Old Reference count should be 1", 1, child1AclFeature.getRefCount());
        assertEquals("New Reference count should be 1", 1, modifiedAclFeature.getRefCount());
        AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER).setName("user1").build();
        fs.removeAclEntries(child1, Lists.newArrayList(aclEntry));
        assertEquals("Old Reference count should be 2 again", 2, child1AclFeature.getRefCount());
        assertEquals("New Reference count should be 0", 0, modifiedAclFeature.getRefCount());
    }
    {
        fs.removeAcl(child2);
        assertEquals("Reference count should be 1", 1, child1AclFeature.getRefCount());
    }
    {
        fs.delete(child1, true);
        assertEquals("Reference count should be 0", 0, child1AclFeature.getRefCount());
    }
    Path file1 = new Path(p1, "file1");
    Path file2 = new Path(p1, "file2");
    AclFeature fileAclFeature;
    {
        fs.create(file1).close();
        fileAclFeature = getAclFeature(file1, cluster);
        assertEquals("Reference count should be 1", 1, fileAclFeature.getRefCount());
        fs.create(file2).close();
        assertEquals("Reference count should be 2", 2, fileAclFeature.getRefCount());
    }
    {
        List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "user1", ALL));
        fs.modifyAclEntries(file1, aclSpec);
        AclFeature modifiedFileAcl = getAclFeature(file1, cluster);
        assertEquals("Old Reference count should be 1", 1, fileAclFeature.getRefCount());
        assertEquals("New Reference count should be 1", 1, modifiedFileAcl.getRefCount());
        AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER).setName("user1").build();
        fs.removeAclEntries(file1, Lists.newArrayList(aclEntry));
        assertEquals("Old Reference count should be 2", 2, fileAclFeature.getRefCount());
        assertEquals("New Reference count should be 0", 0, modifiedFileAcl.getRefCount());
    }
    {
        fs.delete(file2, true);
        assertEquals("Reference count should be decreased on delete of the file", 1, fileAclFeature.getRefCount());
        fs.delete(file1, true);
        assertEquals("Reference count should be decreased on delete of the file", 0, fileAclFeature.getRefCount());
        fs.create(file1).close();
        AclFeature newFileAclFeature = getAclFeature(file1, cluster);
        assertNotSame("Instance should be different on reference count 0", fileAclFeature, newFileAclFeature);
        fileAclFeature = newFileAclFeature;
    }
    Map<AclFeature, Integer> restartRefCounter = new HashMap<>();
    List<AclFeature> entriesBeforeRestart = AclStorage.getUniqueAclFeatures().getEntries();
    {
        for (AclFeature aclFeature : entriesBeforeRestart) {
            restartRefCounter.put(aclFeature, aclFeature.getRefCount());
        }
        cluster.restartNameNode(true);
        List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures().getEntries();
        assertEquals("Entries before and after should be same", entriesBeforeRestart, entriesAfterRestart);
        for (AclFeature aclFeature : entriesAfterRestart) {
            int before = restartRefCounter.get(aclFeature);
            assertEquals("ReferenceCount After Restart should be doubled", before * 2, aclFeature.getRefCount());
        }
    }
    {
        cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        cluster.restartNameNode(true);
        List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures().getEntries();
        assertEquals("Entries before and after should be same", entriesBeforeRestart, entriesAfterRestart);
        for (AclFeature aclFeature : entriesAfterRestart) {
            int before = restartRefCounter.get(aclFeature);
            assertEquals("ReferenceCount After 2 Restarts should be tripled", before * 3, aclFeature.getRefCount());
        }
    }
}
273336.391147hadoop
public void testDeDuplication() throws Exception {
    int startSize = AclStorage.getUniqueAclFeatures().getUniqueElementsSize();
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "testdeduplicateuser", ALL), aclEntry(ACCESS, GROUP, "testdeduplicategroup", ALL));
    hdfs.mkdirs(path);
    hdfs.modifyAclEntries(path, aclSpec);
    assertEquals("One more ACL feature should be unique", startSize + 1, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
    Path subdir = new Path(path, "sub-dir");
    hdfs.mkdirs(subdir);
    Path file = new Path(path, "file");
    hdfs.create(file).close();
    AclFeature aclFeature;
    {
        aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
        assertEquals("Reference count should be one before snapshot", 1, aclFeature.getRefCount());
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(snapshotPath, cluster);
        assertSame(aclFeature, snapshotAclFeature);
        assertEquals("Reference count should be increased", 2, snapshotAclFeature.getRefCount());
    }
    {
        deleteSnapshotWithAclAndVerify(aclFeature, path, startSize);
    }
    {
        hdfs.modifyAclEntries(subdir, aclSpec);
        aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
        assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        Path subdirInSnapshot = new Path(snapshotPath, "sub-dir");
        AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(subdirInSnapshot, cluster);
        assertSame(aclFeature, snapshotAcl);
        assertEquals("Reference count should remain same", 1, aclFeature.getRefCount());
        deleteSnapshotWithAclAndVerify(aclFeature, subdir, startSize);
    }
    {
        hdfs.modifyAclEntries(file, aclSpec);
        aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
        assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        Path fileInSnapshot = new Path(snapshotPath, file.getName());
        AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(fileInSnapshot, cluster);
        assertSame(aclFeature, snapshotAcl);
        assertEquals("Reference count should remain same", 1, aclFeature.getRefCount());
        deleteSnapshotWithAclAndVerify(aclFeature, file, startSize);
    }
    {
        hdfs.modifyAclEntries(path, aclSpec);
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(snapshotPath, cluster);
        aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
        assertEquals("Before modification same ACL should be referenced twice", 2, aclFeature.getRefCount());
        List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER, "testNewUser", ALL));
        hdfs.modifyAclEntries(path, newAcl);
        aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
        AclFeature snapshotAclPostModification = FSAclBaseTest.getAclFeature(snapshotPath, cluster);
        assertSame(snapshotAcl, snapshotAclPostModification);
        assertNotSame(aclFeature, snapshotAclPostModification);
        assertEquals("Old ACL feature reference count should be same", 1, snapshotAcl.getRefCount());
        assertEquals("New ACL feature reference should be used", 1, aclFeature.getRefCount());
        deleteSnapshotWithAclAndVerify(aclFeature, path, startSize);
    }
    {
        hdfs.modifyAclEntries(subdir, aclSpec);
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        Path subdirInSnapshot = new Path(snapshotPath, "sub-dir");
        AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(subdirInSnapshot, cluster);
        List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER, "testNewUser", ALL));
        hdfs.modifyAclEntries(subdir, newAcl);
        aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
        assertNotSame(aclFeature, snapshotAclFeature);
        assertEquals("Reference count should remain same", 1, snapshotAclFeature.getRefCount());
        assertEquals("New AclFeature should be used", 1, aclFeature.getRefCount());
        deleteSnapshotWithAclAndVerify(aclFeature, subdir, startSize);
    }
    {
        hdfs.modifyAclEntries(file, aclSpec);
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        Path fileInSnapshot = new Path(snapshotPath, file.getName());
        AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(fileInSnapshot, cluster);
        List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER, "testNewUser", ALL));
        hdfs.modifyAclEntries(file, newAcl);
        aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
        assertNotSame(aclFeature, snapshotAclFeature);
        assertEquals("Reference count should remain same", 1, snapshotAclFeature.getRefCount());
        deleteSnapshotWithAclAndVerify(aclFeature, file, startSize);
    }
    {
        hdfs.delete(path, true);
        Path dir = new Path(subdir, "dir");
        hdfs.mkdirs(dir);
        hdfs.modifyAclEntries(dir, aclSpec);
        file = new Path(subdir, "file");
        hdfs.create(file).close();
        aclSpec.add(aclEntry(ACCESS, USER, "testNewUser", ALL));
        hdfs.modifyAclEntries(file, aclSpec);
        AclFeature fileAcl = FSAclBaseTest.getAclFeature(file, cluster);
        AclFeature dirAcl = FSAclBaseTest.getAclFeature(dir, cluster);
        Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
        Path dirInSnapshot = new Path(snapshotPath, "sub-dir/dir");
        AclFeature snapshotDirAclFeature = FSAclBaseTest.getAclFeature(dirInSnapshot, cluster);
        Path fileInSnapshot = new Path(snapshotPath, "sub-dir/file");
        AclFeature snapshotFileAclFeature = FSAclBaseTest.getAclFeature(fileInSnapshot, cluster);
        assertSame(fileAcl, snapshotFileAclFeature);
        assertSame(dirAcl, snapshotDirAclFeature);
        hdfs.delete(subdir, true);
        assertEquals("Original ACLs references should be maintained for snapshot", 1, snapshotFileAclFeature.getRefCount());
        assertEquals("Original ACLs references should be maintained for snapshot", 1, snapshotDirAclFeature.getRefCount());
        hdfs.deleteSnapshot(path, snapshotName);
        assertEquals("ACLs should be deleted from snapshot", startSize, AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
    }
}
274189.81139hadoop
public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
    final Path sdir1 = new Path("/dir1");
    final Path sdir2 = new Path("/dir2");
    final Path sdir3 = new Path("/dir3");
    hdfs.mkdirs(sdir1);
    hdfs.mkdirs(sdir2);
    hdfs.mkdirs(sdir3);
    final Path foo_dir1 = new Path(sdir1, "foo");
    final Path bar1_dir1 = new Path(foo_dir1, "bar1");
    final Path bar2_dir1 = new Path(sdir1, "bar");
    DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
    DFSTestUtil.createFile(hdfs, bar2_dir1, BLOCKSIZE, REPL, SEED);
    SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
    final Path foo_dir2 = new Path(sdir2, "foo");
    hdfs.rename(foo_dir1, foo_dir2);
    final Path bar2_dir2 = new Path(sdir2, "bar");
    hdfs.rename(bar2_dir1, bar2_dir2);
    restartClusterAndCheckImage(true);
    final Path bar1_dir2 = new Path(foo_dir2, "bar1");
    hdfs.setReplication(bar1_dir2, REPL_1);
    hdfs.setReplication(bar2_dir2, REPL_1);
    final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo/bar1");
    final Path bar2_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "bar");
    final Path bar1_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo/bar1");
    final Path bar2_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL_1, statusBar1.getReplication());
    FileStatus statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir2);
    assertEquals(REPL_1, statusBar2.getReplication());
    final Path foo_dir3 = new Path(sdir3, "foo");
    hdfs.rename(foo_dir2, foo_dir3);
    final Path bar2_dir3 = new Path(sdir3, "bar");
    hdfs.rename(bar2_dir2, bar2_dir3);
    restartClusterAndCheckImage(true);
    final Path bar1_dir3 = new Path(foo_dir3, "bar1");
    hdfs.setReplication(bar1_dir3, REPL_2);
    hdfs.setReplication(bar2_dir3, REPL_2);
    final Path bar1_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3", "foo/bar1");
    final Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3", "bar");
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir3);
    assertEquals(REPL_2, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir3);
    assertEquals(REPL_2, statusBar2.getReplication());
    hdfs.rename(foo_dir3, foo_dir2);
    hdfs.rename(bar2_dir3, bar2_dir2);
    restartClusterAndCheckImage(true);
    hdfs.setReplication(bar1_dir2, REPL);
    hdfs.setReplication(bar2_dir2, REPL);
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar1 = hdfs.getFileStatus(bar1_dir2);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_dir2);
    assertEquals(REPL, statusBar2.getReplication());
    hdfs.rename(foo_dir2, foo_dir1);
    hdfs.rename(bar2_dir2, bar2_dir1);
    INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString()).asReference();
    INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(2, fooWithCount.getReferenceCount());
    INodeDirectory foo = fooWithCount.asDirectory();
    assertEquals(1, foo.getDiffs().asList().size());
    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
    Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
    assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
    INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
    assertEquals(1, bar1.getDiffs().asList().size());
    assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId());
    INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString()).asReference();
    INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(2, barWithCount.getReferenceCount());
    INodeFile bar = barWithCount.asFile();
    assertEquals(1, bar.getDiffs().asList().size());
    assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId());
    restartClusterAndCheckImage(true);
    hdfs.delete(foo_dir1, true);
    restartClusterAndCheckImage(true);
    hdfs.delete(bar2_dir1, true);
    restartClusterAndCheckImage(true);
    assertTrue(hdfs.exists(bar1_s1));
    assertTrue(hdfs.exists(bar2_s1));
    assertFalse(hdfs.exists(bar1_s2));
    assertFalse(hdfs.exists(bar2_s2));
    assertFalse(hdfs.exists(bar1_s3));
    assertFalse(hdfs.exists(bar2_s3));
    assertFalse(hdfs.exists(foo_dir1));
    assertFalse(hdfs.exists(bar1_dir1));
    assertFalse(hdfs.exists(bar2_dir1));
    statusBar1 = hdfs.getFileStatus(bar1_s1);
    assertEquals(REPL, statusBar1.getReplication());
    statusBar2 = hdfs.getFileStatus(bar2_s1);
    assertEquals(REPL, statusBar2.getReplication());
    final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
    fooRef = fsdir.getINode(foo_s1.toString()).asReference();
    fooWithCount = (WithCount) fooRef.getReferredINode();
    assertEquals(1, fooWithCount.getReferenceCount());
    barRef = fsdir.getINode(bar2_s1.toString()).asReference();
    barWithCount = (WithCount) barRef.getReferredINode();
    assertEquals(1, barWithCount.getReferenceCount());
}
273973.821151hadoop
public void testNameEditsConfigs() throws Exception {
    Path file1 = new Path("TestNameEditsConfigs1");
    Path file2 = new Path("TestNameEditsConfigs2");
    Path file3 = new Path("TestNameEditsConfigs3");
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    Configuration conf = null;
    FileSystem fileSys = null;
    final File newNameDir = new File(base_dir, "name");
    final File newEditsDir = new File(base_dir, "edits");
    final File nameAndEdits = new File(base_dir, "name_and_edits");
    final File checkpointNameDir = new File(base_dir, "secondname");
    final File checkpointEditsDir = new File(base_dir, "secondedits");
    final File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
    ImmutableList<File> allCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(newEditsDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"), new File(checkpointEditsDir, "current"));
    ImmutableList<File> imageCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"));
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file1, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    conf = new HdfsConfiguration();
    assertTrue(newNameDir.mkdir());
    assertTrue(newEditsDir.mkdir());
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(fileSys.exists(file1));
        checkFile(fileSys, file1, replication);
        cleanupFile(fileSys, file1);
        DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file2, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.of("VERSION"));
    FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(fileSys.exists(file2));
        checkFile(fileSys, file2, replication);
        cleanupFile(fileSys, file2);
        DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    checkImageAndEditsFilesExistence(newNameDir, true, false);
    checkImageAndEditsFilesExistence(newEditsDir, false, true);
    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
    assertTrue(FileUtil.fullyDelete(new File(nameAndEdits, "current")));
    assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits, "current")));
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(!fileSys.exists(file2));
        assertTrue(fileSys.exists(file3));
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
}
273385.696140hadoop
public void testNamespaceCommands() throws Exception {
    final Path parent = new Path(PathUtils.getTestDir(getClass()).getPath(), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
    final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
    dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
    ContentSummary c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 6);
    final Path quotaDir2 = new Path(parent, "nqdir0/qdir1/qdir20");
    dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    final Path quotaDir3 = new Path(parent, "nqdir0/qdir1/qdir21");
    assertTrue(dfs.mkdirs(quotaDir3));
    dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), 2);
    Path tempPath = new Path(quotaDir3, "nqdir32");
    assertTrue(dfs.mkdirs(tempPath));
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 2);
    tempPath = new Path(quotaDir3, "nqdir33");
    boolean hasException = false;
    try {
        assertFalse(dfs.mkdirs(tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    c = dfs.getContentSummary(quotaDir3);
    compareQuotaUsage(c, dfs, quotaDir3);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 2);
    tempPath = new Path(quotaDir2, "nqdir31");
    assertTrue(dfs.mkdirs(tempPath));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
    tempPath = new Path(quotaDir2, "nqdir33");
    hasException = false;
    try {
        assertFalse(dfs.mkdirs(tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    tempPath = new Path(quotaDir2, "nqdir30");
    dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
    hasException = false;
    try {
        assertFalse(dfs.rename(tempPath, quotaDir3));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.exists(tempPath));
    assertFalse(dfs.exists(new Path(quotaDir3, "nqdir30")));
    hasException = false;
    try {
        assertFalse(dfs.rename(tempPath, new Path(quotaDir3, "nqdir32")));
    } catch (QuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.exists(tempPath));
    assertFalse(dfs.exists(new Path(quotaDir3, "nqdir32")));
    assertTrue(dfs.rename(tempPath, new Path(parent, "nqdir0")));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 6);
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/nqdir30/nqdir33")));
    hasException = false;
    try {
        assertFalse(dfs.rename(new Path(parent, "nqdir0/nqdir30"), tempPath));
    } catch (NSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    assertTrue(dfs.rename(quotaDir3, quotaDir2));
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 4);
    assertEquals(c.getQuota(), 6);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 7);
    tempPath = new Path(quotaDir2, "qdir21");
    c = dfs.getContentSummary(tempPath);
    compareQuotaUsage(c, dfs, tempPath);
    assertEquals(c.getDirectoryCount(), 1);
    assertEquals(c.getQuota(), 2);
    dfs.delete(tempPath, true);
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 2);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 3);
    assertEquals(c.getQuota(), 6);
    assertTrue(dfs.rename(new Path(parent, "nqdir0/nqdir30"), quotaDir2));
    c = dfs.getContentSummary(quotaDir2);
    compareQuotaUsage(c, dfs, quotaDir2);
    assertEquals(c.getDirectoryCount(), 5);
    assertEquals(c.getQuota(), 7);
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getDirectoryCount(), 6);
    assertEquals(c.getQuota(), 6);
}
273262.3822113hadoop
 void runDiffTest(int startSize, int numModifications) {
    final int width = findWidth(startSize + numModifications);
    System.out.println("\nstartSize=" + startSize + ", numModifications=" + numModifications + ", width=" + width);
    final List<INode> previous = new ArrayList<INode>();
    int n = 0;
    for (; n < startSize; n++) {
        previous.add(newINode(n, width));
    }
    final List<INode> current = new ArrayList<INode>(previous);
    final List<Diff<byte[], INode>> diffs = new ArrayList<Diff<byte[], INode>>();
    for (int j = 0; j < 5; j++) {
        diffs.add(new Diff<byte[], INode>());
    }
    for (int m = 0; m < numModifications; m++) {
        final int j = m * diffs.size() / numModifications;
        final int nextOperation = current.isEmpty() ? 1 : RANDOM.nextInt(3) + 1;
        switch(nextOperation) {
            case 1:
                {
                    final INode i = newINode(n++, width);
                    create(i, current, diffs.get(j));
                    break;
                }
            case 2:
                {
                    final INode i = current.get(RANDOM.nextInt(current.size()));
                    delete(i, current, diffs.get(j));
                    break;
                }
            case 3:
                {
                    final INode i = current.get(RANDOM.nextInt(current.size()));
                    modify(i, current, diffs.get(j));
                    break;
                }
        }
    }
    {
        List<INode> c = previous;
        for (int i = 0; i < diffs.size(); i++) {
            c = diffs.get(i).apply2Previous(c);
        }
        if (!hasIdenticalElements(current, c)) {
            System.out.println("previous = " + previous);
            System.out.println();
            System.out.println("current  = " + current);
            System.out.println("c        = " + c);
            throw new AssertionError("current and c are not identical.");
        }
        List<INode> p = current;
        for (int i = diffs.size() - 1; i >= 0; i--) {
            p = diffs.get(i).apply2Current(p);
        }
        if (!hasIdenticalElements(previous, p)) {
            System.out.println("previous = " + previous);
            System.out.println("p        = " + p);
            System.out.println();
            System.out.println("current  = " + current);
            throw new AssertionError("previous and p are not identical.");
        }
    }
    final Diff<byte[], INode> combined = diffs.get(0);
    for (int i = 1; i < diffs.size(); i++) {
        combined.combinePosterior(diffs.get(i), null);
    }
    {
        final List<INode> c = combined.apply2Previous(previous);
        if (!hasIdenticalElements(current, c)) {
            System.out.println("previous = " + previous);
            System.out.println();
            System.out.println("current  = " + current);
            System.out.println("c        = " + c);
            throw new AssertionError("current and c are not identical.");
        }
        final List<INode> p = combined.apply2Current(current);
        if (!hasIdenticalElements(previous, p)) {
            System.out.println("previous = " + previous);
            System.out.println("p        = " + p);
            System.out.println();
            System.out.println("current  = " + current);
            throw new AssertionError("previous and p are not identical.");
        }
    }
    {
        for (int m = 0; m < n; m++) {
            final INode inode = newINode(m, width);
            {
                final Container<INode> r = combined.accessPrevious(inode.getKey());
                final INode computed;
                if (r != null) {
                    computed = r.getElement();
                } else {
                    final int i = Diff.search(current, inode.getKey());
                    computed = i < 0 ? null : current.get(i);
                }
                final int j = Diff.search(previous, inode.getKey());
                final INode expected = j < 0 ? null : previous.get(j);
                Assert.assertTrue(computed == expected);
            }
            {
                final Container<INode> r = combined.accessCurrent(inode.getKey());
                final INode computed;
                if (r != null) {
                    computed = r.getElement();
                } else {
                    final int i = Diff.search(previous, inode.getKey());
                    computed = i < 0 ? null : previous.get(i);
                }
                final int j = Diff.search(current, inode.getKey());
                final INode expected = j < 0 ? null : current.get(j);
                Assert.assertTrue(computed == expected);
            }
        }
    }
}
273183.5415122hadoop
public void testSortByDistance() throws Exception {
    DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
    testNodes[0] = dataNodes[1];
    testNodes[1] = dataNodes[2];
    testNodes[2] = dataNodes[0];
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[0]);
    assertTrue(testNodes[1] == dataNodes[1]);
    assertTrue(testNodes[2] == dataNodes[2]);
    DatanodeDescriptor[] dtestNodes = new DatanodeDescriptor[5];
    dtestNodes[0] = dataNodes[8];
    dtestNodes[1] = dataNodes[12];
    dtestNodes[2] = dataNodes[11];
    dtestNodes[3] = dataNodes[9];
    dtestNodes[4] = dataNodes[10];
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistance(dataNodes[8], dtestNodes, dtestNodes.length - 2);
    assertTrue(dtestNodes[0] == dataNodes[8]);
    assertTrue(dtestNodes[1] != dtestNodes[2]);
    assertTrue(dtestNodes[1] == dataNodes[11] || dtestNodes[1] == dataNodes[12]);
    assertTrue(dtestNodes[2] == dataNodes[11] || dtestNodes[2] == dataNodes[12]);
    assertTrue(dtestNodes[3] != dtestNodes[4]);
    assertTrue(dtestNodes[3] == dataNodes[9] || dtestNodes[3] == dataNodes[10]);
    assertTrue(dtestNodes[4] == dataNodes[9] || dtestNodes[4] == dataNodes[10]);
    testNodes[0] = dataNodes[1];
    testNodes[1] = dataNodes[3];
    testNodes[2] = dataNodes[0];
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[0]);
    assertTrue(testNodes[1] == dataNodes[1]);
    assertTrue(testNodes[2] == dataNodes[3]);
    testNodes[0] = dataNodes[5];
    testNodes[1] = dataNodes[3];
    testNodes[2] = dataNodes[1];
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[1]);
    assertTrue(testNodes[1] == dataNodes[3]);
    assertTrue(testNodes[2] == dataNodes[5]);
    testNodes[0] = dataNodes[1];
    testNodes[1] = dataNodes[5];
    testNodes[2] = dataNodes[3];
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[1]);
    assertTrue(testNodes[1] == dataNodes[3]);
    assertTrue(testNodes[2] == dataNodes[5]);
    testNodes[0] = dataNodes[1];
    testNodes[1] = dataNodes[5];
    testNodes[2] = dataNodes[3];
    cluster.setRandomSeed(0xDEAD);
    cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[1]);
    assertTrue(testNodes[1] == dataNodes[3]);
    assertTrue(testNodes[2] == dataNodes[5]);
    DatanodeDescriptor first = null;
    boolean foundRandom = false;
    for (int i = 5; i <= 7; i++) {
        testNodes[0] = dataNodes[5];
        testNodes[1] = dataNodes[6];
        testNodes[2] = dataNodes[7];
        cluster.sortByDistance(dataNodes[i], testNodes, testNodes.length);
        if (first == null) {
            first = testNodes[0];
        } else {
            if (first != testNodes[0]) {
                foundRandom = true;
                break;
            }
        }
    }
    assertTrue("Expected to find a different first location", foundRandom);
    first = null;
    for (int i = 1; i <= 4; i++) {
        testNodes[0] = dataNodes[13];
        testNodes[1] = dataNodes[14];
        testNodes[2] = dataNodes[15];
        cluster.sortByDistance(dataNodes[i], testNodes, testNodes.length);
        if (first == null) {
            first = testNodes[0];
        } else {
            if (first != testNodes[0]) {
                foundRandom = true;
                break;
            }
        }
    }
    assertTrue("Expected to find a different first location", foundRandom);
    testNodes[0] = dataNodes[0];
    testNodes[1] = dataNodes[5];
    testNodes[2] = dataNodes[8];
    Node rackClient = new NodeBase("/d3/r1/25.25.25");
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistanceUsingNetworkLocation(rackClient, testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[8]);
    assertTrue(testNodes[1] != testNodes[2]);
    assertTrue(testNodes[1] == dataNodes[0] || testNodes[1] == dataNodes[5]);
    assertTrue(testNodes[2] == dataNodes[0] || testNodes[2] == dataNodes[5]);
    testNodes[0] = dataNodes[8];
    testNodes[1] = dataNodes[5];
    testNodes[2] = dataNodes[0];
    Node dcClient = new NodeBase("/d1/r2/25.25.25");
    cluster.setRandomSeed(0xDEADBEEF);
    cluster.sortByDistanceUsingNetworkLocation(dcClient, testNodes, testNodes.length);
    assertTrue(testNodes[0] == dataNodes[0]);
    assertTrue(testNodes[1] != testNodes[2]);
    assertTrue(testNodes[1] == dataNodes[5] || testNodes[1] == dataNodes[8]);
    assertTrue(testNodes[2] == dataNodes[5] || testNodes[2] == dataNodes[8]);
}
273529.8217125hadoop
public static GetEditsFromTxidResponseProto convertEditsResponse(EventBatchList el) {
    InotifyProtos.EventsListProto.Builder builder = InotifyProtos.EventsListProto.newBuilder();
    for (EventBatch b : el.getBatches()) {
        List<InotifyProtos.EventProto> events = Lists.newArrayList();
        for (Event e : b.getEvents()) {
            switch(e.getEventType()) {
                case CLOSE:
                    Event.CloseEvent ce = (Event.CloseEvent) e;
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CLOSE).setContents(InotifyProtos.CloseEventProto.newBuilder().setPath(ce.getPath()).setFileSize(ce.getFileSize()).setTimestamp(ce.getTimestamp()).build().toByteString()).build());
                    break;
                case CREATE:
                    Event.CreateEvent ce2 = (Event.CreateEvent) e;
                    InotifyProtos.CreateEventProto.Builder pB = (InotifyProtos.CreateEventProto.newBuilder());
                    pB.setType(createTypeConvert(ce2.getiNodeType())).setPath(ce2.getPath()).setCtime(ce2.getCtime()).setOwnerName(ce2.getOwnerName()).setGroupName(ce2.getGroupName()).setPerms(convert(ce2.getPerms())).setReplication(ce2.getReplication()).setSymlinkTarget(ce2.getSymlinkTarget() == null ? "" : ce2.getSymlinkTarget()).setDefaultBlockSize(ce2.getDefaultBlockSize()).setOverwrite(ce2.getOverwrite());
                    if (ce2.isErasureCoded().isPresent()) {
                        pB.setErasureCoded(ce2.isErasureCoded().get());
                    }
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CREATE).setContents(pB.build().toByteString()).build());
                    break;
                case METADATA:
                    Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
                    InotifyProtos.MetadataUpdateEventProto.Builder metaB = InotifyProtos.MetadataUpdateEventProto.newBuilder().setPath(me.getPath()).setType(metadataUpdateTypeConvert(me.getMetadataType())).setMtime(me.getMtime()).setAtime(me.getAtime()).setReplication(me.getReplication()).setOwnerName(me.getOwnerName() == null ? "" : me.getOwnerName()).setGroupName(me.getGroupName() == null ? "" : me.getGroupName()).addAllAcls(me.getAcls() == null ? Lists.<AclEntryProto>newArrayList() : convertAclEntryProto(me.getAcls())).addAllXAttrs(me.getxAttrs() == null ? Lists.<XAttrProto>newArrayList() : convertXAttrProto(me.getxAttrs())).setXAttrsRemoved(me.isxAttrsRemoved());
                    if (me.getPerms() != null) {
                        metaB.setPerms(convert(me.getPerms()));
                    }
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_METADATA).setContents(metaB.build().toByteString()).build());
                    break;
                case RENAME:
                    Event.RenameEvent re = (Event.RenameEvent) e;
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_RENAME).setContents(InotifyProtos.RenameEventProto.newBuilder().setSrcPath(re.getSrcPath()).setDestPath(re.getDstPath()).setTimestamp(re.getTimestamp()).build().toByteString()).build());
                    break;
                case APPEND:
                    Event.AppendEvent re2 = (Event.AppendEvent) e;
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_APPEND).setContents(InotifyProtos.AppendEventProto.newBuilder().setPath(re2.getPath()).setNewBlock(re2.toNewBlock()).build().toByteString()).build());
                    break;
                case UNLINK:
                    Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_UNLINK).setContents(InotifyProtos.UnlinkEventProto.newBuilder().setPath(ue.getPath()).setTimestamp(ue.getTimestamp()).build().toByteString()).build());
                    break;
                case TRUNCATE:
                    Event.TruncateEvent te = (Event.TruncateEvent) e;
                    events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_TRUNCATE).setContents(InotifyProtos.TruncateEventProto.newBuilder().setPath(te.getPath()).setFileSize(te.getFileSize()).setTimestamp(te.getTimestamp()).build().toByteString()).build());
                    break;
                default:
                    throw new RuntimeException("Unexpected inotify event: " + e);
            }
        }
        builder.addBatch(InotifyProtos.EventBatchProto.newBuilder().setTxid(b.getTxid()).addAllEvents(events));
    }
    builder.setFirstTxid(el.getFirstTxid());
    builder.setLastTxid(el.getLastTxid());
    builder.setSyncTxid(el.getSyncTxid());
    return GetEditsFromTxidResponseProto.newBuilder().setEventsList(builder.build()).build();
}
274838.185132hadoop
protected void render(Block html) {
    String jid = $(JOB_ID);
    if (jid.isEmpty()) {
        html.p().__("Sorry, can't do anything without a JobID.").__();
        return;
    }
    JobId jobID = MRApps.toJobID(jid);
    Job job = appContext.getJob(jobID);
    if (job == null) {
        html.p().__("Sorry, ", jid, " not found.").__();
        return;
    }
    List<AMInfo> amInfos = job.getAMInfos();
    String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
    JobInfo jinfo = new JobInfo(job, true);
    info("Job Overview").__("Job Name:", jinfo.getName()).__("User Name:", jinfo.getUserName()).__("Queue Name:", jinfo.getQueueName()).__("State:", jinfo.getState()).__("Uberized:", jinfo.isUberized()).__("Started:", new Date(jinfo.getStartTime())).__("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
    DIV<Hamlet> div = html.__(InfoBlock.class).div(_INFO_WRAP);
    TABLE<DIV<Hamlet>> table = div.table("#job");
    table.tr().th(amString).__().tr().th(_TH, "Attempt Number").th(_TH, "Start Time").th(_TH, "Node").th(_TH, "Logs").__();
    for (AMInfo amInfo : amInfos) {
        AMAttemptInfo attempt = new AMAttemptInfo(amInfo, jinfo.getId(), jinfo.getUserName());
        table.tr().td(String.valueOf(attempt.getAttemptId())).td(new Date(attempt.getStartTime()).toString()).td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress()).__().td().a(".logslink", url(attempt.getLogsLink()), "logs").__().__();
    }
    table.__();
    div.__();
    html.div(_INFO_WRAP).table("#job").tr().th(_TH, "Task Type").th(_TH, "Progress").th(_TH, "Total").th(_TH, "Pending").th(_TH, "Running").th(_TH, "Complete").__().tr(_ODD).th("Map").td().div(_PROGRESSBAR).$title(join(jinfo.getMapProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getMapProgressPercent(), '%')).__().__().__().td().a(url("tasks", jid, "m", "ALL"), String.valueOf(jinfo.getMapsTotal())).__().td().a(url("tasks", jid, "m", "PENDING"), String.valueOf(jinfo.getMapsPending())).__().td().a(url("tasks", jid, "m", "RUNNING"), String.valueOf(jinfo.getMapsRunning())).__().td().a(url("tasks", jid, "m", "COMPLETED"), String.valueOf(jinfo.getMapsCompleted())).__().__().tr(_EVEN).th("Reduce").td().div(_PROGRESSBAR).$title(join(jinfo.getReduceProgressPercent(), '%')).div(_PROGRESSBAR_VALUE).$style(join("width:", jinfo.getReduceProgressPercent(), '%')).__().__().__().td().a(url("tasks", jid, "r", "ALL"), String.valueOf(jinfo.getReducesTotal())).__().td().a(url("tasks", jid, "r", "PENDING"), String.valueOf(jinfo.getReducesPending())).__().td().a(url("tasks", jid, "r", "RUNNING"), String.valueOf(jinfo.getReducesRunning())).__().td().a(url("tasks", jid, "r", "COMPLETED"), String.valueOf(jinfo.getReducesCompleted())).__().__().__().table("#job").tr().th(_TH, "Attempt Type").th(_TH, "New").th(_TH, "Running").th(_TH, "Failed").th(_TH, "Killed").th(_TH, "Successful").__().tr(_ODD).th("Maps").td().a(url("attempts", jid, "m", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledMapAttempts())).__().td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulMapAttempts())).__().__().tr(_EVEN).th("Reduces").td().a(url("attempts", jid, "r", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledReduceAttempts())).__().td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulReduceAttempts())).__().__().__().__();
}
273244.6820122hadoop
private void testProfilerInternal(boolean useDefault) throws Exception {
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;
    }
    final SleepJob sleepJob = new SleepJob();
    final JobConf sleepConf = new JobConf(mrCluster.getConfig());
    sleepConf.setProfileEnabled(true);
    sleepConf.setProfileTaskRange(true, String.valueOf(PROFILED_TASK_ID));
    sleepConf.setProfileTaskRange(false, String.valueOf(PROFILED_TASK_ID));
    if (!useDefault) {
        if (Shell.isJavaVersionAtLeast(9)) {
            sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, "-XX:StartFlightRecording=dumponexit=true,filename=%s");
            sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-XX:StartFlightRecording=dumponexit=true,filename=%s");
        } else {
            sleepConf.set(MRJobConfig.TASK_MAP_PROFILE_PARAMS, "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n," + "file=%s");
            sleepConf.set(MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, "-Xprof");
        }
    }
    sleepJob.setConf(sleepConf);
    final Job job = sleepJob.createJob(2, 2, 500, 1, 500, 1);
    job.setJarByClass(SleepJob.class);
    job.addFileToClassPath(APP_JAR);
    job.waitForCompletion(true);
    final JobId jobId = TypeConverter.toYarn(job.getJobID());
    final ApplicationId appID = jobId.getAppId();
    int pollElapsed = 0;
    while (true) {
        Thread.sleep(1000);
        pollElapsed += 1000;
        if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
            break;
        }
        if (pollElapsed >= 60000) {
            LOG.warn("application did not reach terminal state within 60 seconds");
            break;
        }
    }
    Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
    final Configuration nmConf = mrCluster.getNodeManager(0).getConfig();
    final String appIdStr = appID.toString();
    final String appIdSuffix = appIdStr.substring("application_".length(), appIdStr.length());
    final String containerGlob = "container_" + appIdSuffix + "_*_*";
    final Map<TaskAttemptID, Path> taLogDirs = new HashMap<TaskAttemptID, Path>();
    final Pattern taskPattern = Pattern.compile(".*Task:(attempt_" + appIdSuffix + "_[rm]_" + "[0-9]+_[0-9]+).*");
    for (String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
        for (FileStatus fileStatus : localFs.globStatus(new Path(logDir + Path.SEPARATOR + appIdStr + Path.SEPARATOR + containerGlob + Path.SEPARATOR + TaskLog.LogName.SYSLOG))) {
            final BufferedReader br = new BufferedReader(new InputStreamReader(localFs.open(fileStatus.getPath())));
            String line;
            while ((line = br.readLine()) != null) {
                final Matcher m = taskPattern.matcher(line);
                if (m.matches()) {
                    taLogDirs.put(TaskAttemptID.forName(m.group(1)), fileStatus.getPath().getParent());
                    break;
                }
            }
            br.close();
        }
    }
    Assert.assertEquals(4, taLogDirs.size());
    if (Shell.isJavaVersionAtLeast(9)) {
        return;
    }
    for (Map.Entry<TaskAttemptID, Path> dirEntry : taLogDirs.entrySet()) {
        final TaskAttemptID tid = dirEntry.getKey();
        final Path profilePath = new Path(dirEntry.getValue(), TaskLog.LogName.PROFILE.toString());
        final Path stdoutPath = new Path(dirEntry.getValue(), TaskLog.LogName.STDOUT.toString());
        if (useDefault || tid.getTaskType() == TaskType.MAP) {
            if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
                final BufferedReader br = new BufferedReader(new InputStreamReader(localFs.open(profilePath)));
                final String line = br.readLine();
                Assert.assertTrue("No hprof content found!", line != null && line.startsWith("JAVA PROFILE"));
                br.close();
                Assert.assertEquals(0L, localFs.getFileStatus(stdoutPath).getLen());
            } else {
                Assert.assertFalse("hprof file should not exist", localFs.exists(profilePath));
            }
        } else {
            Assert.assertFalse("hprof file should not exist", localFs.exists(profilePath));
            if (tid.getTaskID().getId() == PROFILED_TASK_ID) {
                final BufferedReader br = new BufferedReader(new InputStreamReader(localFs.open(stdoutPath)));
                boolean flatProfFound = false;
                String line;
                while ((line = br.readLine()) != null) {
                    if (line.startsWith("Flat profile")) {
                        flatProfFound = true;
                        break;
                    }
                }
                br.close();
                Assert.assertTrue("Xprof flat profile not found!", flatProfFound);
            } else {
                Assert.assertEquals(0L, localFs.getFileStatus(stdoutPath).getLen());
            }
        }
    }
}
272636.0525114hadoop
protected String retryableRequest(String[] urls, String path, List<NameValuePair> queryParams, String httpMethod) throws IOException {
    HttpResponse response = null;
    HttpUriRequest httpRequest = null;
    int indexOfLocalUrl = -1;
    for (int i = 0; i < urls.length; i++) {
        if (urls[i].toLowerCase().startsWith("https://localhost:") || urls[i].toLowerCase().startsWith("http://localhost:")) {
            indexOfLocalUrl = i;
        }
    }
    boolean requiresNewAuth = false;
    for (int retry = 0, index = (indexOfLocalUrl != -1) ? indexOfLocalUrl : random.nextInt(urls.length); ; retry++, index++) {
        if (index >= urls.length) {
            index = index % urls.length;
        }
        if (indexOfLocalUrl != -1 && retry == 1) {
            index = (index + random.nextInt(urls.length)) % urls.length;
            if (index == indexOfLocalUrl) {
                index = (index + 1) % urls.length;
            }
        }
        try {
            httpRequest = getHttpRequest(urls, path, queryParams, index, httpMethod, requiresNewAuth);
            httpRequest.setHeader("Accept", APPLICATION_JSON);
            response = client.execute(httpRequest);
            StatusLine statusLine = response.getStatusLine();
            if (statusLine == null || statusLine.getStatusCode() != HttpStatus.SC_OK) {
                requiresNewAuth = (statusLine == null) || (statusLine.getStatusCode() == HttpStatus.SC_UNAUTHORIZED);
                throw new WasbRemoteCallException(httpRequest.getURI().toString() + ":" + ((statusLine != null) ? statusLine.toString() : "NULL"));
            } else {
                requiresNewAuth = false;
            }
            Header contentTypeHeader = response.getFirstHeader("Content-Type");
            if (contentTypeHeader == null || !APPLICATION_JSON.equals(contentTypeHeader.getValue())) {
                throw new WasbRemoteCallException(httpRequest.getURI().toString() + ":" + "Content-Type mismatch: expected: " + APPLICATION_JSON + ", got " + ((contentTypeHeader != null) ? contentTypeHeader.getValue() : "NULL"));
            }
            Header contentLengthHeader = response.getFirstHeader("Content-Length");
            if (contentLengthHeader == null) {
                throw new WasbRemoteCallException(httpRequest.getURI().toString() + ":" + "Content-Length header missing");
            }
            try {
                if (Integer.parseInt(contentLengthHeader.getValue()) > MAX_CONTENT_LENGTH) {
                    throw new WasbRemoteCallException(httpRequest.getURI().toString() + ":" + "Content-Length:" + contentLengthHeader.getValue() + "exceeded max:" + MAX_CONTENT_LENGTH);
                }
            } catch (NumberFormatException nfe) {
                throw new WasbRemoteCallException(httpRequest.getURI().toString() + ":" + "Invalid Content-Length value :" + contentLengthHeader.getValue());
            }
            BufferedReader rd = null;
            StringBuilder responseBody = new StringBuilder();
            try {
                rd = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8));
                String responseLine = "";
                while ((responseLine = rd.readLine()) != null) {
                    responseBody.append(responseLine);
                }
            } finally {
                rd.close();
            }
            return responseBody.toString();
        } catch (URISyntaxException uriSyntaxEx) {
            throw new WasbRemoteCallException("Encountered URISyntaxException " + "while building the HttpGetRequest to remote service", uriSyntaxEx);
        } catch (IOException e) {
            LOG.debug(e.getMessage(), e);
            try {
                shouldRetry(e, retry, (httpRequest != null) ? httpRequest.getURI().toString() : urls[index]);
            } catch (IOException ioex) {
                String message = "Encountered error while making remote call to " + String.join(",", urls) + " retried " + retry + " time(s).";
                LOG.error(message, ioex);
                throw new WasbRemoteCallException(message, ioex);
            }
        }
    }
}
274082.811150hadoop
private void addApplicationOptions(Options opts) {
    opts.addOption(STATUS_CMD, true, "Prints the status of the application. " + "If app ID is provided, it prints the generic YARN application " + "status. If name is provided, it prints the application specific " + "status based on app's own implementation, and -appTypes option " + "must be specified unless it is the default yarn-service type.");
    opts.addOption(LIST_CMD, false, "List applications. Supports optional use " + "of -appTypes to filter applications based on application type, " + "-appStates to filter applications based on application state and " + "-appTags to filter applications based on application tag.");
    opts.addOption(MOVE_TO_QUEUE_CMD, true, "Moves the application to a " + "different queue. Deprecated command. Use 'changeQueue' instead.");
    opts.addOption(QUEUE_CMD, true, "Works with the movetoqueue command to" + " specify which queue to move an application to.");
    opts.addOption(HELP_CMD, false, "Displays help for all commands.");
    Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to " + "filter applications based on input comma-separated list of " + "application types.");
    appTypeOpt.setValueSeparator(',');
    appTypeOpt.setArgs(Option.UNLIMITED_VALUES);
    appTypeOpt.setArgName("Types");
    opts.addOption(appTypeOpt);
    Option appStateOpt = new Option(APP_STATE_CMD, true, "Works with -list to " + "filter applications based on input comma-separated list of " + "application states. " + getAllValidApplicationStates());
    appStateOpt.setValueSeparator(',');
    appStateOpt.setArgs(Option.UNLIMITED_VALUES);
    appStateOpt.setArgName("States");
    opts.addOption(appStateOpt);
    Option appTagOpt = new Option(APP_TAG_CMD, true, "Works with -list to " + "filter applications based on input comma-separated list of " + "application tags.");
    appTagOpt.setValueSeparator(',');
    appTagOpt.setArgs(Option.UNLIMITED_VALUES);
    appTagOpt.setArgName("Tags");
    opts.addOption(appTagOpt);
    opts.addOption(APP_ID, true, "Specify Application Id to be operated");
    opts.addOption(UPDATE_PRIORITY, true, "update priority of an application. " + "ApplicationId can be passed using 'appId' option.");
    opts.addOption(UPDATE_LIFETIME, true, "update timeout of an application " + "from NOW. ApplicationId can be passed using 'appId' option. Timeout " + "value is in seconds.");
    opts.addOption(CHANGE_APPLICATION_QUEUE, true, "Moves application to a new " + "queue. ApplicationId can be  passed using 'appId' option. " + "'movetoqueue' command is  deprecated, this new command " + "'changeQueue' performs same functionality.");
    Option killOpt = new Option(KILL_CMD, true, "Kills the application. Set of " + "applications can be provided separated with space");
    killOpt.setValueSeparator(' ');
    killOpt.setArgs(Option.UNLIMITED_VALUES);
    killOpt.setArgName("Application ID");
    opts.addOption(killOpt);
    opts.getOption(MOVE_TO_QUEUE_CMD).setArgName("Application ID");
    opts.getOption(QUEUE_CMD).setArgName("Queue Name");
    opts.getOption(STATUS_CMD).setArgName("Application Name or ID");
    opts.getOption(APP_ID).setArgName("Application ID");
    opts.getOption(UPDATE_PRIORITY).setArgName("Priority");
    opts.getOption(UPDATE_LIFETIME).setArgName("Timeout");
    opts.getOption(CHANGE_APPLICATION_QUEUE).setArgName("Queue Name");
    opts.addOption(LAUNCH_CMD, true, "Launches application from specification " + "file (saves specification and starts application). Options " + "-updateLifetime and -changeQueue can be specified to alter the " + "values provided in the file. Supports -appTypes option to specify " + "which client implementation to use.");
    opts.addOption(STOP_CMD, true, "Stops application gracefully (may be " + "started again later). If name is provided, appType must be " + "provided unless it is the default yarn-service. If ID is provided, " + "the appType will be looked up. Supports -appTypes option to specify " + "which client implementation to use.");
    opts.addOption(START_CMD, true, "Starts a previously saved application. " + "Supports -appTypes option to specify which client implementation " + "to use.");
    opts.addOption(SAVE_CMD, true, "Saves specification file for an " + "application. Options -updateLifetime and -changeQueue can be " + "specified to alter the values provided in the file. Supports " + "-appTypes option to specify which client implementation to use.");
    opts.addOption(DESTROY_CMD, true, "Destroys a saved application " + "specification and removes all application data permanently. " + "Supports -appTypes option to specify which client implementation " + "to use.");
    opts.addOption(FLEX_CMD, true, "Changes number of running containers for a " + "component of an application / long-running service. Requires " + "-component option. If name is provided, appType must be provided " + "unless it is the default yarn-service. If ID is provided, the " + "appType will be looked up. Supports -appTypes option to specify " + "which client implementation to use.");
    opts.addOption(DECOMMISSION, true, "Decommissions component instances for " + "an application / long-running service. Requires -instances option. " + "Supports -appTypes option to specify which client implementation to " + "use.");
    opts.addOption(COMPONENT, true, "Works with -flex option to change the " + "number of components/containers running for an application / " + "long-running service. Supports absolute or relative changes, such " + "as +1, 2, or -3.");
    opts.addOption(ENABLE_FAST_LAUNCH, true, "Uploads AM dependencies to HDFS " + "to make future launches faster. Supports -appTypes option to " + "specify which client implementation to use. Optionally a " + "destination folder for the tarball can be specified.");
    opts.addOption(UPGRADE_CMD, true, "Upgrades an application/long-running " + "service. It requires either -initiate, -instances, or -finalize " + "options.");
    opts.addOption(UPGRADE_EXPRESS, true, "Works with -upgrade option to " + "perform express upgrade.  It requires the upgraded application " + "specification file.");
    opts.addOption(UPGRADE_INITIATE, true, "Works with -upgrade option to " + "initiate the application upgrade. It requires the upgraded " + "application specification file.");
    opts.addOption(COMPONENT_INSTS, true, "Works with -upgrade option to " + "trigger the upgrade of specified component instances of the " + "application. Also works with -decommission option to decommission " + "specified component instances. Multiple instances should be " + "separated by commas.");
    opts.addOption(COMPONENTS, true, "Works with -upgrade option to trigger " + "the upgrade of specified components of the application. Multiple " + "components should be separated by commas.");
    opts.addOption(UPGRADE_FINALIZE, false, "Works with -upgrade option to " + "finalize the upgrade.");
    opts.addOption(UPGRADE_AUTO_FINALIZE, false, "Works with -upgrade and " + "-initiate options to initiate the upgrade of the application with " + "the ability to finalize the upgrade automatically.");
    opts.addOption(UPGRADE_CANCEL, false, "Works with -upgrade option to " + "cancel current upgrade.");
    opts.addOption(CLUSTER_ID_OPTION, true, "ClusterId. By default, it will " + "take default cluster id from the RM");
    opts.getOption(LAUNCH_CMD).setArgName("Application Name> <File Name");
    opts.getOption(LAUNCH_CMD).setArgs(2);
    opts.getOption(START_CMD).setArgName("Application Name");
    opts.getOption(STOP_CMD).setArgName("Application Name or ID");
    opts.getOption(SAVE_CMD).setArgName("Application Name> <File Name");
    opts.getOption(SAVE_CMD).setArgs(2);
    opts.getOption(DESTROY_CMD).setArgName("Application Name");
    opts.getOption(FLEX_CMD).setArgName("Application Name or ID");
    opts.getOption(COMPONENT).setArgName("Component Name> <Count");
    opts.getOption(COMPONENT).setArgs(2);
    opts.getOption(ENABLE_FAST_LAUNCH).setOptionalArg(true);
    opts.getOption(ENABLE_FAST_LAUNCH).setArgName("Destination Folder");
    opts.getOption(UPGRADE_CMD).setArgName("Application Name");
    opts.getOption(UPGRADE_CMD).setArgs(1);
    opts.getOption(UPGRADE_INITIATE).setArgName("File Name");
    opts.getOption(UPGRADE_INITIATE).setArgs(1);
    opts.getOption(COMPONENT_INSTS).setArgName("Component Instances");
    opts.getOption(COMPONENT_INSTS).setValueSeparator(',');
    opts.getOption(COMPONENT_INSTS).setArgs(Option.UNLIMITED_VALUES);
    opts.getOption(COMPONENTS).setArgName("Components");
    opts.getOption(COMPONENTS).setValueSeparator(',');
    opts.getOption(COMPONENTS).setArgs(Option.UNLIMITED_VALUES);
    opts.getOption(DECOMMISSION).setArgName("Application Name");
    opts.getOption(DECOMMISSION).setArgs(1);
    opts.getOption(CLUSTER_ID_OPTION).setArgName("Cluster ID");
}
272112.5737102hadoop
public GetApplicationsResponse getApplications(GetApplicationsRequest request) throws YarnException {
    UserGroupInformation callerUGI = getCallerUgi(null, AuditConstants.GET_APPLICATIONS_REQUEST);
    Set<String> applicationTypes = getLowerCasedAppTypes(request);
    EnumSet<YarnApplicationState> applicationStates = request.getApplicationStates();
    Set<String> users = request.getUsers();
    Set<String> queues = request.getQueues();
    Set<String> tags = request.getApplicationTags();
    long limit = request.getLimit();
    Range<Long> start = request.getStartRange();
    Range<Long> finish = request.getFinishRange();
    ApplicationsRequestScope scope = request.getScope();
    String name = request.getName();
    final Map<ApplicationId, RMApp> apps = rmContext.getRMApps();
    final Set<ApplicationId> runningAppsFilteredByQueues = getRunningAppsFilteredByQueues(apps, queues);
    Set<String> queuePaths = new HashSet<>();
    for (String queue : queues) {
        String queuePath = rmAppManager.getQueuePath(queue);
        if (queuePath != null) {
            queuePaths.add(queuePath);
        } else {
            queuePaths.add(queue);
        }
    }
    Iterator<RMApp> appsIter = apps.values().iterator();
    List<ApplicationReport> reports = new ArrayList<ApplicationReport>();
    while (appsIter.hasNext() && reports.size() < limit) {
        RMApp application = appsIter.next();
        if (scope == ApplicationsRequestScope.OWN && !callerUGI.getUserName().equals(application.getUser())) {
            continue;
        }
        if (queuePaths != null && !queuePaths.isEmpty()) {
            if (!runningAppsFilteredByQueues.contains(application.getApplicationId()) && !queuePaths.contains(application.getQueue())) {
                continue;
            }
        }
        if (applicationTypes != null && !applicationTypes.isEmpty()) {
            String appTypeToMatch = StringUtils.toLowerCase(application.getApplicationType());
            if (!applicationTypes.contains(appTypeToMatch)) {
                continue;
            }
        }
        if (applicationStates != null && !applicationStates.isEmpty()) {
            if (!applicationStates.contains(application.createApplicationState())) {
                continue;
            }
        }
        if (users != null && !users.isEmpty() && !users.contains(application.getUser())) {
            continue;
        }
        if (start != null && !start.contains(application.getStartTime())) {
            continue;
        }
        if (finish != null && !finish.contains(application.getFinishTime())) {
            continue;
        }
        if (tags != null && !tags.isEmpty()) {
            Set<String> appTags = application.getApplicationTags();
            if (appTags == null || appTags.isEmpty()) {
                continue;
            }
            boolean match = false;
            for (String tag : tags) {
                if (appTags.contains(tag)) {
                    match = true;
                    break;
                }
            }
            if (!match) {
                continue;
            }
        }
        boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application);
        if (scope == ApplicationsRequestScope.VIEWABLE && !allowAccess) {
            continue;
        }
        if (filterAppsByUser && !allowAccess) {
            continue;
        }
        if (name != null && !name.equals(application.getName())) {
            continue;
        }
        reports.add(application.createAndGetApplicationReport(callerUGI.getUserName(), allowAccess));
    }
    RMAuditLogger.logSuccess(callerUGI.getUserName(), AuditConstants.GET_APPLICATIONS_REQUEST, "ClientRMService");
    GetApplicationsResponse response = recordFactory.newRecordInstance(GetApplicationsResponse.class);
    response.setApplicationList(reports);
    return response;
}
272490.7922132hadoop
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException {
    NodeStatus remoteNodeStatus = request.getNodeStatus();
    NodeId nodeId = remoteNodeStatus.getNodeId();
    if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) {
        String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost();
        LOG.info(message);
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.SHUTDOWN, message);
    }
    RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
    if (rmNode == null) {
        String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
        LOG.info(message);
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message);
    }
    this.nmLivelinessMonitor.receivedPing(nodeId);
    this.decommissioningWatcher.update(rmNode, remoteNodeStatus);
    NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
    if (getNextResponseId(remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse.getResponseId()) {
        LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress() + " responseId=" + remoteNodeStatus.getResponseId());
        return lastNodeHeartbeatResponse;
    } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse.getResponseId()) {
        String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId();
        LOG.info(message);
        this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message);
    }
    if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned(rmNode.getNodeID())) {
        String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned";
        LOG.info(message);
        this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
        this.nmLivelinessMonitor.unregister(nodeId);
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.SHUTDOWN, message);
    }
    if (timelineServiceV2Enabled) {
        updateAppCollectorsMap(request);
    }
    long newInterval = nextHeartBeatInterval;
    if (heartBeatIntervalScalingEnable) {
        newInterval = rmNode.calculateHeartBeatInterval(nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor);
    }
    NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse(getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval);
    rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse);
    populateKeys(request, nodeHeartBeatResponse);
    populateTokenSequenceNo(request, nodeHeartBeatResponse);
    if (timelineServiceV2Enabled) {
        setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse);
    }
    RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus);
    if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) {
        nodeStatusEvent.setLogAggregationReportsForApps(request.getLogAggregationReportsForApps());
    }
    this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);
    if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
        try {
            updateNodeLabelsFromNMReport(NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId);
            nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
        } catch (IOException ex) {
            nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
            nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
        }
    }
    String nid = nodeId.toString();
    Resource capability = loadNodeResourceFromDRConfiguration(nid);
    if (capability != null) {
        nodeHeartBeatResponse.setResource(capability);
    }
    if (rmNode.isUpdatedCapability()) {
        nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
        rmNode.resetUpdatedCapability();
    }
    if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) {
        nodeHeartBeatResponse.setContainerQueuingLimit(this.rmContext.getNodeManagerQueueLimitCalculator().createContainerQueuingLimit());
    }
    if (request.getNodeAttributes() != null) {
        try {
            updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
            nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true);
        } catch (IOException ex) {
            String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex.getMessage();
            nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg);
            nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false);
        }
    }
    return nodeHeartBeatResponse;
}
273424.4914121hadoop
public void testAMRestartWithExistingContainers() throws Exception {
    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
    MockRM rm1 = new MockRM(getConf());
    rm1.start();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm1).withAppName("name").withUser("user").withAcls(new HashMap<ApplicationAccessType, String>()).withUnmanagedAM(false).withQueue("default").withMaxAppAttempts(-1).withCredentials(null).withAppType("MAPREDUCE").withWaitForAppAcceptedState(false).withKeepContainers(true).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
    MockNM nm1 = new MockNM("127.0.0.1:1234", 10240, rm1.getResourceTrackerService());
    nm1.registerNode();
    MockNM nm2 = new MockNM("127.0.0.1:2351", 4089, rm1.getResourceTrackerService());
    nm2.registerNode();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    int NUM_CONTAINERS = 3;
    allocateContainers(nm1, am1, NUM_CONTAINERS);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
    ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
    ContainerId containerId3 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
    rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
    ContainerId containerId4 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 4);
    rm1.waitForState(nm1, containerId4, RMContainerState.ACQUIRED);
    am1.allocate("127.0.0.1", 1024, 1, new ArrayList<ContainerId>());
    nm1.nodeHeartbeat(true);
    ContainerId containerId5 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 5);
    rm1.waitForState(nm1, containerId5, RMContainerState.ALLOCATED);
    am1.allocate("127.0.0.1", 6000, 1, new ArrayList<ContainerId>());
    ContainerId containerId6 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 6);
    nm1.nodeHeartbeat(true);
    SchedulerApplicationAttempt schedulerAttempt = ((AbstractYarnScheduler) rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6);
    while (schedulerAttempt.getReservedContainers().isEmpty()) {
        System.out.println("Waiting for container " + containerId6 + " to be reserved.");
        nm1.nodeHeartbeat(true);
        Thread.sleep(200);
    }
    Assert.assertEquals(containerId6, schedulerAttempt.getReservedContainers().get(0).getContainerId());
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
    rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
    Thread.sleep(3000);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4));
    Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5));
    rm1.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
    ApplicationAttemptId newAttemptId = app1.getCurrentAppAttempt().getAppAttemptId();
    Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
    MockAM am2 = rm1.launchAM(app1, rm1, nm1);
    RegisterApplicationMasterResponse registerResponse = am2.registerAppAttempt();
    Assert.assertEquals(2, registerResponse.getContainersFromPreviousAttempts().size());
    boolean containerId2Exists = false, containerId3Exists = false;
    for (Container container : registerResponse.getContainersFromPreviousAttempts()) {
        if (container.getId().equals(containerId2)) {
            containerId2Exists = true;
        }
        if (container.getId().equals(containerId3)) {
            containerId3Exists = true;
        }
    }
    Assert.assertTrue(containerId2Exists && containerId3Exists);
    rm1.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.COMPLETE);
    RMAppAttempt newAttempt = app1.getRMAppAttempt(am2.getApplicationAttemptId());
    waitForContainersToFinish(4, newAttempt);
    boolean container3Exists = false, container4Exists = false, container5Exists = false, container6Exists = false;
    for (ContainerStatus status : newAttempt.getJustFinishedContainers()) {
        if (status.getContainerId().equals(containerId3)) {
            container3Exists = true;
        }
        if (status.getContainerId().equals(containerId4)) {
            container4Exists = true;
        }
        if (status.getContainerId().equals(containerId5)) {
            container5Exists = true;
        }
        if (status.getContainerId().equals(containerId6)) {
            container6Exists = true;
        }
    }
    Assert.assertTrue(container3Exists && container4Exists && container5Exists && container6Exists);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    SchedulerApplicationAttempt schedulerNewAttempt = ((AbstractYarnScheduler) rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2);
    MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am2);
    Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2));
    System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers());
    waitForContainersToFinish(5, newAttempt);
    rm1.stop();
}
274062.41142hadoop
public void testContainerTransitionNotifyAllocationTagsManager() throws Exception {
    DrainDispatcher drainDispatcher = new DrainDispatcher();
    EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
    EventHandler generic = mock(EventHandler.class);
    drainDispatcher.register(RMAppAttemptEventType.class, appAttemptEventHandler);
    drainDispatcher.register(RMNodeEventType.class, generic);
    drainDispatcher.init(new YarnConfiguration());
    drainDispatcher.start();
    NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
    ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
    ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
    Resource resource = Resources.createResource(512);
    Priority priority = BuilderUtils.newPriority(5);
    Container container = BuilderUtils.newContainer(containerId, nodeId, "host:3465", resource, priority, null);
    container.setAllocationTags(ImmutableSet.of("mapper"));
    ConcurrentMap<ApplicationId, RMApp> rmApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    when(rmApp.getRMAppAttempt(any())).thenReturn(null);
    Mockito.doReturn(rmApp).when(rmApps).get(ArgumentMatchers.<ApplicationId>any());
    RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
    SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
    RMContext rmContext = mock(RMContext.class);
    AllocationTagsManager tagsManager = new AllocationTagsManager(rmContext);
    when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
    when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
    when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
    when(rmContext.getRMApps()).thenReturn(rmApps);
    when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
    when(rmContext.getAllocationTagsManager()).thenReturn(tagsManager);
    YarnConfiguration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, true);
    when(rmContext.getYarnConfiguration()).thenReturn(conf);
    RMNode rmNode = new RMNodeImpl(nodeId, rmContext, "localhost", 0, 0, null, Resource.newInstance(10240, 10), null);
    SchedulerNode schedulerNode = new FiCaSchedulerNode(rmNode, false);
    RMContainerImpl rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), null), Long::max));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START));
    schedulerNode.allocateContainer(rmContainer);
    Assert.assertEquals(1, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus.newInstance(containerId, ContainerState.COMPLETE, "", 0), RMContainerEventType.KILL));
    schedulerNode.releaseContainer(container.getId(), true);
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START));
    schedulerNode.allocateContainer(rmContainer);
    Assert.assertEquals(1, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus.newInstance(containerId, ContainerState.COMPLETE, "", 0), RMContainerEventType.FINISHED));
    schedulerNode.releaseContainer(container.getId(), true);
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START));
    schedulerNode.allocateContainer(rmContainer);
    Assert.assertEquals(1, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
    rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
    rmContainer.handle(new RMContainerFinishedEvent(containerId, ContainerStatus.newInstance(containerId, ContainerState.COMPLETE, "", 0), RMContainerEventType.FINISHED));
    schedulerNode.releaseContainer(container.getId(), true);
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
    rmContainer.setAllocationTags(ImmutableSet.of("mapper"));
    Assert.assertEquals(0, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
    NMContainerStatus containerStatus = NMContainerStatus.newInstance(containerId, 0, ContainerState.NEW, Resource.newInstance(1024, 1), "recover container", 0, Priority.newInstance(0), 0);
    containerStatus.setAllocationTags(ImmutableSet.of("mapper"));
    rmContainer.handle(new RMContainerRecoverEvent(containerId, containerStatus));
    Assert.assertEquals(1, tagsManager.getNodeCardinalityByOp(nodeId, AllocationTags.createSingleAppAllocationTags(appId, null), Long::max));
}
273982.931138hadoop
public void testEffectiveResourceAfterIncreasingClusterResource() throws Exception {
    CapacitySchedulerConfiguration csConf = setupComplexQueueConfiguration(false);
    setupComplexMinMaxResourceConfig(csConf);
    csConf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    @SuppressWarnings("resource")
    MockRM rm = new MockRM(csConf);
    rm.start();
    rm.registerNode("127.0.0.1:1234", 125 * GB, 20);
    rm.registerNode("127.0.0.2:1234", 125 * GB, 20);
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    ParentQueue qA = (ParentQueue) cs.getQueue(QUEUEA);
    Assert.assertNotNull(qA);
    Assert.assertEquals("Min resource configured for QUEUEA is not correct", QUEUE_A_MINRES, qA.usageTracker.getQueueResourceQuotas().getConfiguredMinResource());
    Assert.assertEquals("Max resource configured for QUEUEA is not correct", QUEUE_A_MAXRES, qA.usageTracker.getQueueResourceQuotas().getConfiguredMaxResource());
    Assert.assertEquals("Effective Min resource for QUEUEA is not correct", QUEUE_A_MINRES, qA.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA is not correct", QUEUE_A_MAXRES, qA.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA is not correct", 0.4, qA.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA is not correct", 0.8, qA.getAbsoluteMaximumCapacity(), DELTA);
    ParentQueue qB = (ParentQueue) cs.getQueue(QUEUEB);
    Assert.assertNotNull(qB);
    Assert.assertEquals("Min resource configured for QUEUEB is not correct", QUEUE_B_MINRES, qB.usageTracker.getQueueResourceQuotas().getConfiguredMinResource());
    Assert.assertEquals("Max resource configured for QUEUEB is not correct", QUEUE_B_MAXRES, qB.usageTracker.getQueueResourceQuotas().getConfiguredMaxResource());
    Assert.assertEquals("Effective Min resource for QUEUEB is not correct", QUEUE_B_MINRES, qB.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEB is not correct", QUEUE_B_MAXRES, qB.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEB is not correct", 0.2, qB.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEB is not correct", 0.6, qB.getAbsoluteMaximumCapacity(), DELTA);
    LeafQueue qC = (LeafQueue) cs.getQueue(QUEUEC);
    Assert.assertNotNull(qC);
    Assert.assertEquals("Min resource configured for QUEUEC is not correct", QUEUE_C_MINRES, qC.usageTracker.getQueueResourceQuotas().getConfiguredMinResource());
    Assert.assertEquals("Max resource configured for QUEUEC is not correct", QUEUE_C_MAXRES, qC.usageTracker.getQueueResourceQuotas().getConfiguredMaxResource());
    Assert.assertEquals("Effective Min resource for QUEUEC is not correct", QUEUE_C_MINRES, qC.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEC is not correct", QUEUE_C_MAXRES, qC.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEC is not correct", 0.1, qC.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEC is not correct", 0.6, qC.getAbsoluteMaximumCapacity(), DELTA);
    LeafQueue qA1 = (LeafQueue) cs.getQueue(QUEUEA1);
    Assert.assertEquals("Effective Min resource for QUEUEA1 is not correct", QUEUE_A1_MINRES, qA1.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA1 is not correct", QUEUE_A_MAXRES, qA1.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA1 is not correct", 0.2, qA1.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA1 is not correct", 0.8, qA1.getAbsoluteMaximumCapacity(), DELTA);
    LeafQueue qA2 = (LeafQueue) cs.getQueue(QUEUEA2);
    Assert.assertEquals("Effective Min resource for QUEUEA2 is not correct", QUEUE_A2_MINRES, qA2.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA2 is not correct", QUEUE_A_MAXRES, qA2.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA2 is not correct", 0.2, qA2.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA2 is not correct", 0.8, qA2.getAbsoluteMaximumCapacity(), DELTA);
    LeafQueue qB1 = (LeafQueue) cs.getQueue(QUEUEB1);
    Assert.assertEquals("Min resource configured for QUEUEB1 is not correct", QUEUE_B1_MINRES, qB1.usageTracker.getQueueResourceQuotas().getConfiguredMinResource());
    Assert.assertEquals("Max resource configured for QUEUEB1 is not correct", QUEUE_B_MAXRES, qB1.usageTracker.getQueueResourceQuotas().getConfiguredMaxResource());
    Assert.assertEquals("Effective Min resource for QUEUEB1 is not correct", QUEUE_B1_MINRES, qB1.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEB1 is not correct", QUEUE_B_MAXRES, qB1.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEB1 is not correct", 0.16, qB1.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEB1 is not correct", 0.6, qB1.getAbsoluteMaximumCapacity(), DELTA);
    rm.registerNode("127.0.0.3:1234", 125 * GB, 20);
    Assert.assertEquals("Effective Min resource for QUEUEA is not correct", QUEUE_A_MINRES, qA.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA is not correct", QUEUE_A_MAXRES, qA.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA is not correct", 0.266, qA.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA is not correct", 0.533, qA.getAbsoluteMaximumCapacity(), DELTA);
    Assert.assertEquals("Effective Min resource for QUEUEB is not correct", QUEUE_B_MINRES, qB.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEB is not correct", QUEUE_B_MAXRES, qB.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEB is not correct", 0.133, qB.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEB is not correct", 0.4, qB.getAbsoluteMaximumCapacity(), DELTA);
    Assert.assertEquals("Effective Min resource for QUEUEC is not correct", QUEUE_C_MINRES, qC.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEC is not correct", QUEUE_C_MAXRES, qC.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEC is not correct", 0.066, qC.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEC is not correct", 0.4, qC.getAbsoluteMaximumCapacity(), DELTA);
    Assert.assertEquals("Effective Min resource for QUEUEB1 is not correct", QUEUE_B1_MINRES, qB1.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEB1 is not correct", QUEUE_B_MAXRES, qB1.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEB1 is not correct", 0.106, qB1.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEB1 is not correct", 0.4, qB1.getAbsoluteMaximumCapacity(), DELTA);
    Assert.assertEquals("Effective Min resource for QUEUEA1 is not correct", QUEUE_A1_MINRES, qA1.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA1 is not correct", QUEUE_A_MAXRES, qA1.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA1 is not correct", 0.133, qA1.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA1 is not correct", 0.533, qA1.getAbsoluteMaximumCapacity(), DELTA);
    Assert.assertEquals("Effective Min resource for QUEUEA2 is not correct", QUEUE_A2_MINRES, qA2.usageTracker.getQueueResourceQuotas().getEffectiveMinResource());
    Assert.assertEquals("Effective Max resource for QUEUEA2 is not correct", QUEUE_A_MAXRES, qA2.usageTracker.getQueueResourceQuotas().getEffectiveMaxResource());
    Assert.assertEquals("Absolute capacity for QUEUEA2 is not correct", 0.133, qA2.getAbsoluteCapacity(), DELTA);
    Assert.assertEquals("Absolute Max capacity for QUEUEA2 is not correct", 0.533, qA2.getAbsoluteMaximumCapacity(), DELTA);
    rm.stop();
}
274895.681132hadoop
public void testHeadroom() throws Exception {
    simpleNodeLabelMappingToManager();
    CapacitySchedulerConfiguration csConf = (CapacitySchedulerConfiguration) TestUtils.getComplexConfigurationWithQueueLabels(conf);
    csConf.setUserLimit(A1, 25);
    csConf.setUserLimit(B2, 25);
    YarnConfiguration conf = new YarnConfiguration();
    CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
    when(csContext.getConfiguration()).thenReturn(csConf);
    when(csContext.getConf()).thenReturn(conf);
    when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
    when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
    when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
    when(csContext.getRMContext()).thenReturn(spyRMContext);
    when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
    CapacitySchedulerQueueManager queueManager = new CapacitySchedulerQueueManager(csConf, mgr, null);
    when(csContext.getCapacitySchedulerQueueManager()).thenReturn(queueManager);
    queueManager.reinitConfiguredNodeLabels(csConf);
    mgr.activateNode(NodeId.newInstance("h0", 0), Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h1", 0), Resource.newInstance(160 * GB, 16));
    mgr.activateNode(NodeId.newInstance("h2", 0), Resource.newInstance(160 * GB, 16));
    Resource clusterResource = Resources.createResource(160 * GB);
    when(csContext.getClusterResource()).thenReturn(clusterResource);
    CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(csContext);
    CSQueueStore queues = new CSQueueStore();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(queueContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    queueManager.setRootQueue(rootQueue);
    rootQueue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequests()).thenReturn(Collections.singletonList(amResourceRequest));
    Mockito.doReturn(rmApp).when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt(any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
    Mockito.doReturn(true).when(spyApps).containsKey(ArgumentMatchers.<ApplicationId>any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    Resource expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    app_0_1_requests.clear();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_0_1.updateResourceRequests(app_0_1_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    queue.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    Resource expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    app_1_0_requests.clear();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
    app_1_0.updateResourceRequests(app_1_0_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
    expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
    assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
27466.119247hadoop
public void testQueueMetricsWithLabelsOnDefaultLabelNode() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(this.conf);
    csConf.setQueues(ROOT, new String[] { "a", "b" });
    csConf.setCapacityByLabel(ROOT, "x", 100);
    csConf.setCapacity(A, 25);
    csConf.setAccessibleNodeLabels(A, toSet("x"));
    csConf.setCapacityByLabel(A, "x", 50);
    csConf.setMaximumCapacityByLabel(A, "x", 50);
    csConf.setCapacity(B, 75);
    csConf.setAccessibleNodeLabels(B, toSet("x"));
    csConf.setCapacityByLabel(B, "x", 50);
    csConf.setMaximumCapacityByLabel(B, "x", 50);
    csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
    mgr.addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x", false)));
    mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
    MockRM rm1 = new MockRM(csConf) {

        @Override
        public RMNodeLabelsManager createNodeLabelManager() {
            return mgr;
        }
    };
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    SchedulerNode schedulerNode1 = cs.getSchedulerNode(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    SchedulerNode schedulerNode2 = cs.getSchedulerNode(nm2.getNodeId());
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    }
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    double delta = 0.0001;
    CSQueue leafQueue = cs.getQueue("a");
    CSQueue leafQueueB = cs.getQueue("b");
    CSQueue rootQueue = cs.getRootQueue();
    assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta);
    assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
    assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta);
    MetricsSystem ms = leafQueueB.getMetrics().getMetricsSystem();
    QueueMetrics partXMetrics = (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "x");
    QueueMetrics partDefaultMetrics = (QueueMetrics) TestPartitionQueueMetrics.partitionSource(ms, "");
    QueueMetrics queueAMetrics = (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.a");
    QueueMetrics queueBMetrics = (QueueMetrics) TestQueueMetrics.queueSource(ms, "root.b");
    QueueMetrics queueAPartDefaultMetrics = (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.a");
    QueueMetrics queueAPartXMetrics = (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.a");
    QueueMetrics queueBPartDefaultMetrics = (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "", "root.b");
    QueueMetrics queueBPartXMetrics = (QueueMetrics) TestPartitionQueueMetrics.queueSource(ms, "x", "root.b");
    QueueMetrics rootMetrics = (QueueMetrics) TestQueueMetrics.queueSource(ms, "root");
    assertEquals(10 * GB, partXMetrics.getAvailableMB(), delta);
    assertEquals(10 * GB, partDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta);
    assertEquals(10 * GB, rootMetrics.getAvailableMB(), delta);
    assertEquals(2.5 * GB, queueAMetrics.getAvailableMB(), delta);
    assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta);
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1).withAppName("app").withUser("user").withAcls(null).withQueue("a").withUnmanagedAM(false).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
    am1.allocate("*", 1 * GB, 3, new ArrayList<ContainerId>());
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    }
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    Assert.assertEquals(3, schedulerNode1.getNumContainers());
    SchedulerNodeReport reportNm1 = rm1.getResourceScheduler().getNodeReport(nm1.getNodeId());
    Assert.assertEquals(3 * GB, reportNm1.getUsedResource().getMemorySize());
    Assert.assertEquals(7 * GB, reportNm1.getAvailableResource().getMemorySize());
    SchedulerNodeReport reportNm2 = rm1.getResourceScheduler().getNodeReport(nm2.getNodeId());
    Assert.assertEquals(1 * GB, reportNm2.getUsedResource().getMemorySize());
    Assert.assertEquals(9 * GB, reportNm2.getAvailableResource().getMemorySize());
    assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta);
    assertEquals(9 * GB, partDefaultMetrics.getAvailableMB(), delta);
    assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(7.5 * GB, queueBPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
    assertEquals(5 * GB, queueBPartXMetrics.getAvailableMB(), delta);
    assertEquals(1 * GB, queueAMetrics.getAllocatedMB(), delta);
    assertEquals(1.5 * GB, queueAMetrics.getAvailableMB(), delta);
    assertEquals(0 * GB, queueBMetrics.getAllocatedMB(), delta);
    assertEquals(7.5 * GB, queueBMetrics.getAvailableMB(), delta);
    assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueBPartDefaultMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueBPartXMetrics.getPendingMB(), delta);
    assertEquals(1.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
    assertEquals(1 * GB, leafQueue.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta);
    assertEquals(1 * GB, partDefaultMetrics.getAllocatedMB(), delta);
    QueueMetrics partDefaultQueueAUserMetrics = (QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "", "user", "root.a");
    QueueMetrics partXQueueAUserMetrics = (QueueMetrics) TestPartitionQueueMetrics.userSource(ms, "x", "user", "root.a");
    QueueMetrics queueAUserMetrics = (QueueMetrics) TestQueueMetrics.userSource(ms, "root.a", "user");
    assertEquals(2 * GB, queueAUserMetrics.getAvailableMB(), delta);
    assertEquals(1 * GB, queueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(1.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(1 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
    assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
    am1.allocate("*", 1 * GB, 5, new ArrayList<ContainerId>());
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    Assert.assertEquals(2, schedulerNode2.getNumContainers());
    Assert.assertEquals(3, schedulerNode1.getNumContainers());
    assertEquals(5 * GB / 10, leafQueue.getMetrics().getAvailableMB());
    assertEquals(2 * GB, leafQueue.getMetrics().getAllocatedMB());
    assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB() + rootQueue.getMetrics().getAllocatedMB());
    assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAMetrics.getAllocatedMB());
    assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(2 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
    assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(2 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(7 * GB, partXMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, partXMetrics.getAllocatedMB(), delta);
    assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(4 * GB, queueAMetrics.getPendingMB(), delta);
    assertEquals(4 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
    assertEquals(4 * GB, partDefaultQueueAUserMetrics.getPendingMB(), delta);
    assertEquals(4 * GB, queueAUserMetrics.getPendingMB(), delta);
    assertEquals(4 * GB, partDefaultMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partXMetrics.getPendingMB(), delta);
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    }
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    assertEquals(0.5 * GB, queueAMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAMetrics.getAllocatedMB());
    assertEquals(0.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAPartDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(0 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(7 * GB, queueAPartXMetrics.getAllocatedMB(), delta);
    assertEquals(1 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, partDefaultQueueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(0 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(7 * GB, partXQueueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(1 * GB, queueAUserMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, queueAUserMetrics.getAllocatedMB(), delta);
    assertEquals(3 * GB, partXMetrics.getAvailableMB(), delta);
    assertEquals(7 * GB, partXMetrics.getAllocatedMB(), delta);
    assertEquals(8 * GB, partDefaultMetrics.getAvailableMB(), delta);
    assertEquals(2 * GB, partDefaultMetrics.getAllocatedMB(), delta);
    assertEquals(0 * GB, queueAMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAPartDefaultMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partDefaultQueueAUserMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAUserMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partDefaultMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, queueAPartXMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partXQueueAUserMetrics.getPendingMB(), delta);
    assertEquals(0 * GB, partXMetrics.getPendingMB(), delta);
    rm1.killApp(app1.getApplicationId());
    rm1.waitForState(app1.getApplicationId(), RMAppState.KILLED);
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    }
    for (int i = 0; i < 50; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    assertEquals(10 * GB, rootQueue.getMetrics().getAvailableMB(), delta);
    assertEquals(2.5 * GB, leafQueue.getMetrics().getAvailableMB(), delta);
    assertEquals(7.5 * GB, leafQueueB.getMetrics().getAvailableMB(), delta);
    assertEquals(2, queueAMetrics.getAggregateAllocatedContainers());
    assertEquals(2, queueAMetrics.getAggegatedReleasedContainers());
    assertEquals(2, queueAPartDefaultMetrics.getAggregateAllocatedContainers());
    assertEquals(2, queueAPartDefaultMetrics.getAggegatedReleasedContainers());
    assertEquals(7, partXMetrics.getAggregateAllocatedContainers());
    assertEquals(2, partDefaultMetrics.getAggregateAllocatedContainers());
    assertEquals(7, queueAPartXMetrics.getAggregateAllocatedContainers());
    assertEquals(7, queueAPartXMetrics.getAggegatedReleasedContainers());
    assertEquals(2.5 * GB, queueAPartDefaultMetrics.getAvailableMB(), delta);
    assertEquals(5 * GB, queueAPartXMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, queueAUserMetrics.getAvailableMB(), delta);
    assertEquals(3 * GB, partDefaultQueueAUserMetrics.getAvailableMB(), delta);
    assertEquals(5 * GB, partXQueueAUserMetrics.getAvailableMB(), delta);
    rm1.close();
}
274357.741141hadoop
public void testMultiLevelQueues() throws Exception {
    setupMultiLevelQueues(csConf);
    CSQueueStore queues = new CSQueueStore();
    CSQueue root = CapacitySchedulerQueueManager.parseQueue(queueContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
    final int memoryPerNode = 10;
    final int coresPerNode = 16;
    final int numNodes = 3;
    FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
    FiCaSchedulerNode node_1 = TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode * GB);
    FiCaSchedulerNode node_2 = TestUtils.getMockNode("host_2", DEFAULT_RACK, 0, memoryPerNode * GB);
    final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    CSQueue a = queues.get(A);
    a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue b = queues.get(B);
    b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue c = queues.get(C);
    c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue d = queues.get(D);
    d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue a1 = queues.get(A1);
    a1.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue a2 = queues.get(A2);
    a2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue b1 = queues.get(B1);
    b1.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue b2 = queues.get(B2);
    b2.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    CSQueue b3 = queues.get(B3);
    b3.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
    stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    verifyQueueMetrics(a, 0 * GB, clusterResource);
    verifyQueueMetrics(b, 0 * GB, clusterResource);
    verifyQueueMetrics(c, 1 * GB, clusterResource);
    verifyQueueMetrics(d, 0 * GB, clusterResource);
    reset(a);
    reset(b);
    reset(c);
    stubQueueAllocation(a, clusterResource, node_1, 0 * GB);
    stubQueueAllocation(b2, clusterResource, node_1, 4 * GB);
    stubQueueAllocation(c, clusterResource, node_1, 0 * GB);
    root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    applyAllocationToQueue(clusterResource, 4 * GB, b);
    verifyQueueMetrics(a, 0 * GB, clusterResource);
    verifyQueueMetrics(b, 4 * GB, clusterResource);
    verifyQueueMetrics(c, 1 * GB, clusterResource);
    reset(a);
    reset(b);
    reset(c);
    stubQueueAllocation(a1, clusterResource, node_0, 1 * GB);
    stubQueueAllocation(b3, clusterResource, node_0, 2 * GB);
    stubQueueAllocation(c, clusterResource, node_0, 2 * GB);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    InOrder allocationOrder = inOrder(a, c, b);
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    applyAllocationToQueue(clusterResource, 1 * GB, a);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder.verify(c).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    applyAllocationToQueue(clusterResource, 2 * GB, root);
    root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    applyAllocationToQueue(clusterResource, 2 * GB, b);
    verifyQueueMetrics(a, 1 * GB, clusterResource);
    verifyQueueMetrics(b, 6 * GB, clusterResource);
    verifyQueueMetrics(c, 3 * GB, clusterResource);
    reset(a);
    reset(b);
    reset(c);
    LOG.info("here");
    ((ParentQueue) a).setMaxCapacity(.1f);
    stubQueueAllocation(a1, clusterResource, node_2, 1 * GB);
    stubQueueAllocation(a2, clusterResource, node_2, 2 * GB);
    stubQueueAllocation(b3, clusterResource, node_2, 1 * GB);
    stubQueueAllocation(b1, clusterResource, node_2, 1 * GB);
    stubQueueAllocation(c, clusterResource, node_2, 1 * GB);
    root.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder = inOrder(a, a2, a1, b, c);
    allocationOrder.verify(a).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    allocationOrder.verify(a2).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    applyAllocationToQueue(clusterResource, 2 * GB, a);
    root.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder.verify(b).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    applyAllocationToQueue(clusterResource, 2 * GB, b);
    root.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    allocationOrder.verify(c).assignContainers(eq(clusterResource), any(CandidateNodeSet.class), anyResourceLimits(), any(SchedulingMode.class));
    verifyQueueMetrics(a, 3 * GB, clusterResource);
    verifyQueueMetrics(b, 8 * GB, clusterResource);
    verifyQueueMetrics(c, 4 * GB, clusterResource);
    reset(a);
    reset(b);
    reset(c);
}
272682.871165hadoop
public void testAllocationTagsManagerSimpleCases() throws InvalidAllocationTagsQueryException {
    AllocationTagsManager atm = new AllocationTagsManager(rmContext);
    atm.addContainer(NodeId.fromString("host1:123"), TestUtils.getMockContainerId(1, 1), ImmutableSet.of("mapper", "reducer"));
    atm.addContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(1, 2), ImmutableSet.of("mapper", "reducer"));
    atm.addContainer(NodeId.fromString("host1:123"), TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
    atm.addContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
    atm.addContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper")), Long::max));
    Assert.assertEquals(2, atm.getRackCardinality("rack0", TestUtils.getMockApplicationId(1), "mapper"));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::min));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::max));
    Assert.assertEquals(3, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::sum));
    Assert.assertEquals(1, atm.getNodeCardinality(NodeId.fromString("host2:123"), TestUtils.getMockApplicationId(1), "mapper"));
    Assert.assertEquals(2, atm.getNodeCardinality(NodeId.fromString("host2:123"), TestUtils.getMockApplicationId(1), "reducer"));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("no_existed", "reducer")), Long::min));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), null), Long::max));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), null), Long::max));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of()), Long::max));
    Assert.assertEquals(4, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createGlobalAllocationTags(ImmutableSet.of()), Long::sum));
    Assert.assertEquals(3, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of()), Long::sum));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(2), ImmutableSet.of()), Long::sum));
    atm.removeContainer(NodeId.fromString("host1:123"), TestUtils.getMockContainerId(1, 1), ImmutableSet.of("mapper", "reducer"));
    atm.removeContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(1, 2), ImmutableSet.of("mapper", "reducer"));
    atm.removeContainer(NodeId.fromString("host1:123"), TestUtils.getMockContainerId(1, 3), ImmutableSet.of("service"));
    atm.removeContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(1, 4), ImmutableSet.of("reducer"));
    atm.removeContainer(NodeId.fromString("host2:123"), TestUtils.getMockContainerId(2, 3), ImmutableSet.of("service"));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper")), Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::min));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of("mapper", "reducer")), Long::sum));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of(TestUtils.getMockApplicationId(1).toString())), Long::max));
    Assert.assertEquals(0, atm.getNodeCardinality(NodeId.fromString("host2:123"), TestUtils.getMockApplicationId(1), TestUtils.getMockApplicationId(1).toString()));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of()), Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createGlobalAllocationTags(ImmutableSet.of()), Long::sum));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of()), Long::sum));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"), AllocationTags.createSingleAppAllocationTags(TestUtils.getMockApplicationId(1), ImmutableSet.of()), Long::sum));
}
274325.64129hadoop
public void testContainerPromoteAndDemoteBeforeContainerStart() throws Exception {
    HashMap<NodeId, MockNM> nodes = new HashMap<>();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm1.getNodeId(), nm1);
    MockNM nm2 = new MockNM("h1:4321", 4096, rm.getResourceTrackerService());
    nodes.put(nm2.getNodeId(), nm2);
    MockNM nm3 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm3.getNodeId(), nm3);
    MockNM nm4 = new MockNM("h2:4321", 4096, rm.getResourceTrackerService());
    nodes.put(nm4.getNodeId(), nm4);
    nm1.registerNode();
    nm2.registerNode();
    nm3.registerNode();
    nm4.registerNode();
    nm1.nodeHeartbeat(oppContainersStatus, true);
    nm2.nodeHeartbeat(oppContainersStatus, true);
    nm3.nodeHeartbeat(oppContainersStatus, true);
    nm4.nodeHeartbeat(oppContainersStatus, true);
    OpportunisticContainerAllocatorAMService amservice = (OpportunisticContainerAllocatorAMService) rm.getApplicationMasterService();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("user").withAcls(null).withQueue("default").withUnmanagedAM(false).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm, data);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    nm1.nodeHeartbeat(oppContainersStatus, true);
    nm2.nodeHeartbeat(oppContainersStatus, true);
    nm3.nodeHeartbeat(oppContainersStatus, true);
    nm4.nodeHeartbeat(oppContainersStatus, true);
    GenericTestUtils.waitFor(() -> amservice.getLeastLoadedNodes().size() == 4, 10, 10 * 100);
    QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue().getMetrics();
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    AllocateResponse allocateResponse = am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 2, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true))), null);
    List<Container> allocatedContainers = allocateResponse.getAllocatedContainers();
    Assert.assertEquals(2, allocatedContainers.size());
    Container container = allocatedContainers.get(0);
    MockNM allocNode = nodes.get(container.getNodeId());
    MockNM sameHostDiffNode = null;
    for (NodeId n : nodes.keySet()) {
        if (n.getHost().equals(allocNode.getNodeId().getHost()) && n.getPort() != allocNode.getNodeId().getPort()) {
            sameHostDiffNode = nodes.get(n);
        }
    }
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED)));
    sameHostDiffNode.nodeHeartbeat(oppContainersStatus, true);
    rm.drainEvents();
    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    dispatcher.waitForEventThreadToWait();
    rm.drainEvents();
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("UPDATE_OUTSTANDING_ERROR", allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(container.getId(), allocateResponse.getUpdateErrors().get(0).getUpdateContainerRequest().getContainerId());
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(1, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("INCORRECT_CONTAINER_VERSION_ERROR", allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(0, allocateResponse.getUpdateErrors().get(0).getCurrentContainerVersion());
    Assert.assertEquals(container.getId(), allocateResponse.getUpdateErrors().get(0).getUpdateContainerRequest().getContainerId());
    allocNode.nodeHeartbeat(oppContainersStatus, true);
    rm.drainEvents();
    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    Container uc = allocateResponse.getUpdatedContainers().get(0).getContainer();
    Assert.assertEquals(ExecutionType.GUARANTEED, uc.getExecutionType());
    Assert.assertEquals(uc.getId(), container.getId());
    Assert.assertEquals(uc.getVersion(), container.getVersion() + 1);
    verifyMetrics(metrics, 14336, 14, 2048, 2, 2);
    nm1.nodeHeartbeat(oppContainersStatus, true);
    nm2.nodeHeartbeat(oppContainersStatus, true);
    nm3.nodeHeartbeat(oppContainersStatus, true);
    nm4.nodeHeartbeat(oppContainersStatus, true);
    rm.drainEvents();
    RMContainer rmContainer = ((CapacityScheduler) scheduler).getApplicationAttempt(uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId());
    Assert.assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(uc.getVersion(), uc.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE, null, ExecutionType.OPPORTUNISTIC)));
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    uc = allocateResponse.getUpdatedContainers().get(0).getContainer();
    Assert.assertEquals(ExecutionType.OPPORTUNISTIC, uc.getExecutionType());
    Assert.assertEquals(uc.getId(), container.getId());
    Assert.assertEquals(uc.getVersion(), container.getVersion() + 2);
    dispatcher.waitForEventThreadToWait();
    rm.drainEvents();
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
}
273621.026134hadoop
public void testGetApplicableNodeCountForAMLocalityAndLabels() throws Exception {
    List<NodeId> rack1Nodes = new ArrayList<>();
    for (int i = 0; i < 29; i++) {
        rack1Nodes.add(NodeId.newInstance("host" + i, 1234));
    }
    NodeId node1 = NodeId.newInstance("node1", 1234);
    NodeId node2 = NodeId.newInstance("node2", 1234);
    rack1Nodes.add(node2);
    Set<NodeId> noLabelNodes = new HashSet<>();
    for (int i = 0; i < 19; i++) {
        noLabelNodes.add(rack1Nodes.get(i));
    }
    noLabelNodes.add(node2);
    for (int i = 29; i < 89; i++) {
        noLabelNodes.add(NodeId.newInstance("host" + i, 1234));
    }
    Set<NodeId> label1Nodes = new HashSet<>();
    label1Nodes.add(node1);
    for (int i = 89; i < 93; i++) {
        label1Nodes.add(NodeId.newInstance("host" + i, 1234));
    }
    for (int i = 19; i < 29; i++) {
        label1Nodes.add(rack1Nodes.get(i));
    }
    label1Nodes.add(NodeId.newInstance("host101", 0));
    label1Nodes.add(NodeId.newInstance("host102", 0));
    Map<String, Set<NodeId>> label1NodesMap = new HashMap<>();
    label1NodesMap.put("label1", label1Nodes);
    YarnConfiguration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
    ResourceScheduler scheduler = Mockito.mock(ResourceScheduler.class);
    Mockito.when(scheduler.getNumClusterNodes()).thenReturn(100);
    Mockito.when(scheduler.getNodeIds("/rack1")).thenReturn(rack1Nodes);
    Mockito.when(scheduler.getNodeIds("node1")).thenReturn(Collections.singletonList(node1));
    Mockito.when(scheduler.getNodeIds("node2")).thenReturn(Collections.singletonList(node2));
    RMContext rmContext = Mockito.mock(RMContext.class);
    Mockito.when(rmContext.getScheduler()).thenReturn(scheduler);
    RMNodeLabelsManager labMan = Mockito.mock(RMNodeLabelsManager.class);
    Mockito.when(labMan.getNodesWithoutALabel()).thenReturn(noLabelNodes);
    Mockito.when(labMan.getLabelsToNodes(Collections.singleton("label1"))).thenReturn(label1NodesMap);
    Mockito.when(rmContext.getNodeLabelManager()).thenReturn(labMan);
    ResourceRequest anyReq = createResourceRequest(ResourceRequest.ANY, true, null);
    List<ResourceRequest> reqs = new ArrayList<>();
    reqs.add(anyReq);
    Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    ResourceRequest rackReq = createResourceRequest("/rack1", true, null);
    reqs.add(rackReq);
    Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    anyReq.setRelaxLocality(false);
    Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(false);
    Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    ResourceRequest node1Req = createResourceRequest("node1", false, null);
    reqs.add(node1Req);
    Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node1Req.setRelaxLocality(true);
    Assert.assertEquals(0, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(true);
    Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    ResourceRequest node2Req = createResourceRequest("node2", false, null);
    reqs.add(node2Req);
    Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node2Req.setRelaxLocality(true);
    Assert.assertEquals(20, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(false);
    Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node1Req.setRelaxLocality(false);
    Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node2Req.setRelaxLocality(false);
    Assert.assertEquals(80, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    anyReq.setNodeLabelExpression("label1");
    rackReq.setNodeLabelExpression("label1");
    node1Req.setNodeLabelExpression("label1");
    node2Req.setNodeLabelExpression("label1");
    anyReq.setRelaxLocality(true);
    reqs = new ArrayList<>();
    reqs.add(anyReq);
    Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(true);
    reqs.add(rackReq);
    Assert.assertEquals(10, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    anyReq.setRelaxLocality(false);
    Assert.assertEquals(10, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(false);
    Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node1Req.setRelaxLocality(false);
    reqs.add(node1Req);
    Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node1Req.setRelaxLocality(true);
    Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(true);
    Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node2Req.setRelaxLocality(false);
    reqs.add(node2Req);
    Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node2Req.setRelaxLocality(true);
    Assert.assertEquals(11, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    rackReq.setRelaxLocality(false);
    Assert.assertEquals(1, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node1Req.setRelaxLocality(false);
    Assert.assertEquals(0, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
    node2Req.setRelaxLocality(false);
    Assert.assertEquals(15, RMServerUtils.getApplicableNodeCountForAM(rmContext, conf, reqs));
}
272969.0314127hadoop
public void testGetEntitiesMetricFilters() throws Exception {
    Client client = createClient();
    try {
        URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20OR%20" + "HDFS_BYTES_READ%20eq%20157");
        ClientResponse resp = getResponse(client, uri);
        Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        for (TimelineEntity entity : entities) {
            assertTrue(entity.getId().equals("entity1") || entity.getId().equals("entity2"));
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20AND%20" + "MAP_SLOT_MILLIS%20gt%2040");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(0, entities.size());
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" + "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" + "%20140%20AND%20MAP11_SLOT_MILLIS%20le%20122)");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
        int metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            assertEquals("entity2", entity.getId());
        }
        assertEquals(0, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" + "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" + "%20140%20AND%20MAP11_SLOT_MILLIS%20le%20122)&fields=METRICS");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            assertEquals("entity2", entity.getId());
        }
        assertEquals(3, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" + "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" + "%20140%20AND%20MAP11_SLOT_MILLIS%20le%20122)&metricstoretrieve=" + "!(HDFS)");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            assertEquals("entity2", entity.getId());
            for (TimelineMetric metric : entity.getMetrics()) {
                assertTrue(metric.getId().startsWith("MAP1"));
                assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType());
            }
        }
        assertEquals(2, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" + "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" + "%20140%20AND%20MAP11_SLOT_MILLIS%20le%20122)&metricstoretrieve=" + "!(HDFS)&metricslimit=10");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            assertEquals("entity2", entity.getId());
            for (TimelineMetric metric : entity.getMetrics()) {
                assertTrue(metric.getId().startsWith("MAP1"));
                if (metric.getId().equals("MAP1_SLOT_MILLIS")) {
                    assertEquals(2, metric.getValues().size());
                    assertEquals(TimelineMetric.Type.TIME_SERIES, metric.getType());
                } else if (metric.getId().equals("MAP11_SLOT_MILLIS")) {
                    assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric.getType());
                } else {
                    fail("Unexpected metric id");
                }
            }
        }
        assertEquals(2, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ne%20100");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        for (TimelineEntity entity : entities) {
            assertTrue(entity.getId().equals("entity1") || entity.getId().equals("entity2"));
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ene%20100");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
        for (TimelineEntity entity : entities) {
            assertEquals("entity2", entity.getId());
        }
    } finally {
        client.destroy();
    }
}
273707.086132hadoop
public void testMetricFilters() throws Exception {
    String cluster = "cluster1";
    String user = "user1";
    String flow = "flow_name1";
    TimelineEntities te = new TimelineEntities();
    TimelineEntity entityApp1 = TestFlowDataGenerator.getEntityMetricsApp1(System.currentTimeMillis());
    te.addEntity(entityApp1);
    HBaseTimelineWriterImpl hbi = null;
    Configuration c1 = util.getConfiguration();
    try {
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(c1);
        UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354", 1002345678919L, "application_11111111111111_1111"), te, remoteUser);
        te = new TimelineEntities();
        TimelineEntity entityApp2 = TestFlowDataGenerator.getEntityMetricsApp2(System.currentTimeMillis());
        te.addEntity(entityApp2);
        hbi.write(new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354", 1002345678918L, "application_11111111111111_2222"), te, remoteUser);
        hbi.flush();
    } finally {
        if (hbi != null) {
            hbi.close();
        }
    }
    HBaseTimelineReaderImpl hbr = null;
    try {
        hbr = new HBaseTimelineReaderImpl();
        hbr.init(c1);
        hbr.start();
        TimelineFilterList list1 = new TimelineFilterList();
        list1.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, METRIC1, 101));
        TimelineFilterList list2 = new TimelineFilterList();
        list2.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, METRIC1, 43));
        list2.addFilter(new TimelineCompareFilter(TimelineCompareOp.EQUAL, METRIC2, 57));
        TimelineFilterList metricFilterList = new TimelineFilterList(Operator.OR, list1, list2);
        Set<TimelineEntity> entities = hbr.getEntities(new TimelineReaderContext(cluster, user, flow, null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
        assertEquals(2, entities.size());
        int metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
        }
        assertEquals(3, metricCnt);
        TimelineFilterList metricFilterList1 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, METRIC1, 127), new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, METRIC2, 30));
        entities = hbr.getEntities(new TimelineReaderContext(cluster, user, flow, null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
        assertEquals(1, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
        }
        assertEquals(2, metricCnt);
        TimelineFilterList metricFilterList2 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, METRIC1, 32), new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, METRIC2, 57));
        entities = hbr.getEntities(new TimelineReaderContext(cluster, user, flow, null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList2).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
        assertEquals(0, entities.size());
        TimelineFilterList metricFilterList3 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.EQUAL, "s_metric", 32));
        entities = hbr.getEntities(new TimelineReaderContext(cluster, user, flow, null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList3).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
        assertEquals(0, entities.size());
        TimelineFilterList list3 = new TimelineFilterList();
        list3.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, METRIC1, 101));
        TimelineFilterList list4 = new TimelineFilterList();
        list4.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, METRIC1, 43));
        list4.addFilter(new TimelineCompareFilter(TimelineCompareOp.EQUAL, METRIC2, 57));
        TimelineFilterList metricFilterList4 = new TimelineFilterList(Operator.OR, list3, list4);
        TimelineFilterList metricsToRetrieve = new TimelineFilterList(Operator.OR, new TimelinePrefixFilter(TimelineCompareOp.EQUAL, METRIC2.substring(0, METRIC2.indexOf("_") + 1)));
        entities = hbr.getEntities(new TimelineReaderContext(cluster, user, flow, null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList4).build(), new TimelineDataToRetrieve(null, metricsToRetrieve, EnumSet.of(Field.ALL), null, null, null));
        assertEquals(2, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
        }
        assertEquals(1, metricCnt);
    } finally {
        if (hbr != null) {
            hbr.close();
        }
    }
}
273909.361150kafka
public void testStaticMemberGetsBackAssignmentUponRejoin() {
    String groupId = "fooup";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    String member2RejoinId = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setInstanceId(memberId1).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build();
    ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setInstanceId(memberId2).setMemberEpoch(10).setPreviousMemberEpoch(9).setRebalanceTimeoutMs(5000).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(member1).withMember(member2).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10).withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
        }
    })).build();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))));
            put(member2RejoinId, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))));
        }
    }));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setInstanceId(memberId2).setMemberEpoch(-2).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId2).setMemberEpoch(-2), result.response());
    ConsumerGroupMember member2UpdatedEpoch = new ConsumerGroupMember.Builder(member2).setMemberEpoch(-2).build();
    assertEquals(1, result.records().size());
    assertRecordEquals(result.records().get(0), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, member2UpdatedEpoch));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> rejoinResult = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setMemberId(member2RejoinId).setGroupId(groupId).setInstanceId(memberId2).setMemberEpoch(0).setRebalanceTimeoutMs(5000).setServerAssignor("range").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(member2RejoinId).setMemberEpoch(10).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Arrays.asList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(barTopicId).setPartitions(Collections.singletonList(2))))), rejoinResult.response());
    ConsumerGroupMember expectedRejoinedMember = new ConsumerGroupMember.Builder(member2RejoinId).setState(MemberState.STABLE).setMemberEpoch(10).setInstanceId(memberId2).setPreviousMemberEpoch(0).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build();
    List<CoordinatorRecord> expectedRecordsAfterRejoin = Arrays.asList(CoordinatorRecordHelpers.newCurrentAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newTargetAssignmentTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newMemberSubscriptionTombstoneRecord(groupId, memberId2), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedRejoinedMember), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, member2RejoinId, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 10), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedRejoinedMember));
    assertRecordsEquals(expectedRecordsAfterRejoin, rejoinResult.records());
    context.assertNoSessionTimeout(groupId, memberId2);
    context.assertNoRebalanceTimeout(groupId, memberId2);
}
273785.911145kafka
public void testConsumerGroupHeartbeatWithStableClassicGroup() {
    String groupId = "group-id";
    String memberId1 = "member-id-1";
    String memberId2 = "member-id-2";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
        }
    }));
    MetadataImage metadataImage = new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 1).addTopic(barTopicId, barTopicName, 1).addRacks().build();
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withConsumerGroupMigrationPolicy(ConsumerGroupMigrationPolicy.UPGRADE).withAssignors(Collections.singletonList(assignor)).withMetadataImage(metadataImage).build();
    JoinGroupRequestData.JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestData.JoinGroupRequestProtocolCollection(1);
    protocols.add(new JoinGroupRequestData.JoinGroupRequestProtocol().setName("range").setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(Arrays.asList(fooTopicName, barTopicName), null, Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0)))))));
    Map<String, byte[]> assignments = new HashMap<String, byte[]>() {

        {
            put(memberId1, Utils.toArray(ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(barTopicName, 0))))));
        }
    };
    ClassicGroup group = context.createClassicGroup(groupId);
    group.setProtocolName(Optional.ofNullable("range"));
    group.add(new ClassicGroupMember(memberId1, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", protocols, assignments.get(memberId1)));
    group.transitionTo(PREPARING_REBALANCE);
    group.transitionTo(COMPLETING_REBALANCE);
    group.transitionTo(STABLE);
    context.replay(CoordinatorRecordHelpers.newGroupMetadataRecord(group, assignments, metadataImage.features().metadataVersion()));
    context.commit();
    group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, false);
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setRebalanceTimeoutMs(5000).setServerAssignor("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setTopicPartitions(Collections.emptyList()));
    ConsumerGroupMember expectedMember1 = new ConsumerGroupMember.Builder(memberId1).setMemberEpoch(0).setPreviousMemberEpoch(0).setClientId("client-id").setClientHost("client-host").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(10000).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(protocols))).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).build();
    ConsumerGroupMember expectedMember2 = new ConsumerGroupMember.Builder(memberId2).setMemberEpoch(1).setPreviousMemberEpoch(0).setState(MemberState.UNRELEASED_PARTITIONS).setClientId("client").setClientHost("localhost/127.0.0.1").setServerAssignorName("range").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(5000).setAssignedPartitions(Collections.emptyMap()).build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newGroupMetadataTombstoneRecord(groupId), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 0), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, expectedMember1.assignedPartitions()), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 0), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember1), CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember2), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 1, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                }
            }));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, new HashMap<Integer, Set<String>>() {

                {
                    put(0, new HashSet<>(Arrays.asList("rack0", "rack1")));
                }
            }));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 1), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, assignor.targetPartitions(memberId2)), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, assignor.targetPartitions(memberId1)), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 1), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember2));
    assertRecordsEquals(expectedRecords, result.records());
    context.assertSessionTimeout(groupId, memberId1, 45000);
    context.assertSessionTimeout(groupId, memberId2, 45000);
    context.rollback();
    assertEquals(group, context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false));
}
273871.021144kafka
public void testJoiningConsumerGroupWithExistingStaticMemberAndNewSubscription() throws Exception {
    String groupId = "group-id";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    Uuid zarTopicId = Uuid.randomUuid();
    String zarTopicName = "zar";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    String instanceId = "instance-id";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addTopic(zarTopicId, zarTopicName, 1).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
            put(zarTopicName, new TopicMetadata(zarTopicId, zarTopicName, 1, mkMapOfPartitionRacks(1)));
        }
    }).withMember(new ConsumerGroupMember.Builder(memberId1).setInstanceId(instanceId).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(5000).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName), Arrays.asList(new TopicPartition(fooTopicName, 0), new TopicPartition(fooTopicName, 1)))))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 1))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(barTopicId, 0))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 1))).withAssignmentEpoch(10)).build();
    ConsumerGroup group = context.groupMetadataManager.consumerGroup(groupId);
    group.setMetadataRefreshDeadline(Long.MAX_VALUE, 11);
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0), mkTopicAssignment(fooTopicId, 1))));
        }
    }));
    JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(memberId1).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())).build();
    GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(request);
    ConsumerGroupMember expectedMember = new ConsumerGroupMember.Builder(memberId1).setInstanceId(instanceId).setMemberEpoch(11).setPreviousMemberEpoch(10).setRebalanceTimeoutMs(500).setClientId("client").setClientHost("localhost/127.0.0.1").setState(MemberState.STABLE).setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName, zarTopicName)).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName, zarTopicName), Collections.emptyList())))).build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0), mkTopicAssignment(zarTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, mkAssignment(mkTopicAssignment(barTopicId, 0), mkTopicAssignment(fooTopicId, 1))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember));
    assertRecordsEquals(expectedRecords.subList(0, 2), joinResult.records.subList(0, 2));
    assertUnorderedListEquals(expectedRecords.subList(2, 4), joinResult.records.subList(2, 4));
    assertRecordsEquals(expectedRecords.subList(4, 6), joinResult.records.subList(4, 6));
    joinResult.appendFuture.complete(null);
    assertEquals(new JoinGroupResponseData().setMemberId(memberId1).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), joinResult.joinFuture.get());
    context.assertSessionTimeout(groupId, memberId1, request.sessionTimeoutMs());
    context.assertSyncTimeout(groupId, memberId1, request.rebalanceTimeoutMs());
}
273648.7421115wildfly
public void writeContent(XMLExtendedStreamWriter writer, SubsystemMarshallingContext context) throws XMLStreamException {
    context.startSubsystemElement(InfinispanSubsystemSchema.CURRENT.getNamespace().getUri(), false);
    ModelNode model = context.getModelNode();
    if (model.isDefined()) {
        if (model.hasDefined(CacheContainerResourceDefinition.WILDCARD_PATH.getKey())) {
            for (Property entry : model.get(CacheContainerResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                String containerName = entry.getName();
                ModelNode container = entry.getValue();
                writer.writeStartElement(XMLElement.CACHE_CONTAINER.getLocalName());
                writer.writeAttribute(XMLAttribute.NAME.getLocalName(), containerName);
                writeAttributes(writer, container, CacheContainerResourceDefinition.Attribute.class);
                writeAttributes(writer, container, CacheContainerResourceDefinition.ListAttribute.class);
                if (container.hasDefined(JGroupsTransportResourceDefinition.PATH.getKeyValuePair())) {
                    writer.writeStartElement(XMLElement.TRANSPORT.getLocalName());
                    ModelNode transport = container.get(JGroupsTransportResourceDefinition.PATH.getKeyValuePair());
                    writeAttributes(writer, transport, EnumSet.allOf(JGroupsTransportResourceDefinition.Attribute.class));
                    writer.writeEndElement();
                }
                if (container.hasDefined(ThreadPoolResourceDefinition.WILDCARD_PATH.getKey())) {
                    writeThreadPoolElements(XMLElement.BLOCKING_THREAD_POOL, ThreadPoolResourceDefinition.BLOCKING, writer, container);
                    writeThreadPoolElements(XMLElement.LISTENER_THREAD_POOL, ThreadPoolResourceDefinition.LISTENER, writer, container);
                    writeThreadPoolElements(XMLElement.NON_BLOCKING_THREAD_POOL, ThreadPoolResourceDefinition.NON_BLOCKING, writer, container);
                    writeScheduledThreadPoolElements(XMLElement.EXPIRATION_THREAD_POOL, ScheduledThreadPoolResourceDefinition.EXPIRATION, writer, container);
                }
                if (container.hasDefined(LocalCacheResourceDefinition.WILDCARD_PATH.getKey())) {
                    for (Property property : container.get(LocalCacheResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                        ModelNode cache = property.getValue();
                        writer.writeStartElement(XMLElement.LOCAL_CACHE.getLocalName());
                        writeCacheAttributes(writer, property.getName(), cache);
                        writeCacheElements(writer, cache);
                        writer.writeEndElement();
                    }
                }
                if (container.hasDefined(InvalidationCacheResourceDefinition.WILDCARD_PATH.getKey())) {
                    for (Property property : container.get(InvalidationCacheResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                        ModelNode cache = property.getValue();
                        writer.writeStartElement(XMLElement.INVALIDATION_CACHE.getLocalName());
                        writeClusteredCacheAttributes(writer, property.getName(), cache);
                        writeCacheElements(writer, cache);
                        writer.writeEndElement();
                    }
                }
                if (container.hasDefined(ReplicatedCacheResourceDefinition.WILDCARD_PATH.getKey())) {
                    for (Property property : container.get(ReplicatedCacheResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                        ModelNode cache = property.getValue();
                        writer.writeStartElement(XMLElement.REPLICATED_CACHE.getLocalName());
                        writeClusteredCacheAttributes(writer, property.getName(), cache);
                        writeCacheElements(writer, cache);
                        writer.writeEndElement();
                    }
                }
                if (container.hasDefined(DistributedCacheResourceDefinition.WILDCARD_PATH.getKey())) {
                    for (Property property : container.get(DistributedCacheResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                        ModelNode cache = property.getValue();
                        writer.writeStartElement(XMLElement.DISTRIBUTED_CACHE.getLocalName());
                        writeSegmentedCacheAttributes(writer, property.getName(), cache);
                        writeAttributes(writer, cache, EnumSet.allOf(DistributedCacheResourceDefinition.Attribute.class));
                        writeCacheElements(writer, cache);
                        writer.writeEndElement();
                    }
                }
                if (container.hasDefined(ScatteredCacheResourceDefinition.WILDCARD_PATH.getKey())) {
                    for (Property property : container.get(ScatteredCacheResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                        ModelNode cache = property.getValue();
                        writer.writeStartElement(XMLElement.SCATTERED_CACHE.getLocalName());
                        writeSegmentedCacheAttributes(writer, property.getName(), cache);
                        writeAttributes(writer, cache, EnumSet.allOf(ScatteredCacheResourceDefinition.Attribute.class));
                        writeCacheElements(writer, cache);
                        writer.writeEndElement();
                    }
                }
                writer.writeEndElement();
            }
        }
        if (model.hasDefined(RemoteCacheContainerResourceDefinition.WILDCARD_PATH.getKey())) {
            for (Property entry : model.get(RemoteCacheContainerResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                String remoteContainerName = entry.getName();
                ModelNode remoteContainer = entry.getValue();
                writer.writeStartElement(XMLElement.REMOTE_CACHE_CONTAINER.getLocalName());
                writer.writeAttribute(XMLAttribute.NAME.getLocalName(), remoteContainerName);
                writeAttributes(writer, remoteContainer, EnumSet.complementOf(EnumSet.of(RemoteCacheContainerResourceDefinition.Attribute.PROPERTIES)));
                writeAttributes(writer, remoteContainer, RemoteCacheContainerResourceDefinition.ListAttribute.class);
                writeAttributes(writer, remoteContainer, EnumSet.allOf(RemoteCacheContainerResourceDefinition.DeprecatedAttribute.class));
                writeThreadPoolElements(XMLElement.ASYNC_THREAD_POOL, ThreadPoolResourceDefinition.CLIENT, writer, remoteContainer);
                ModelNode connectionPool = remoteContainer.get(ConnectionPoolResourceDefinition.PATH.getKeyValuePair());
                Set<ConnectionPoolResourceDefinition.Attribute> attributes = EnumSet.allOf(ConnectionPoolResourceDefinition.Attribute.class);
                if (hasDefined(connectionPool, attributes)) {
                    writer.writeStartElement(XMLElement.CONNECTION_POOL.getLocalName());
                    writeAttributes(writer, connectionPool, attributes);
                    writer.writeEndElement();
                }
                writeElement(writer, remoteContainer, StoreResourceDefinition.Attribute.PROPERTIES);
                writer.writeStartElement(XMLElement.REMOTE_CLUSTERS.getLocalName());
                for (Property clusterEntry : remoteContainer.get(RemoteClusterResourceDefinition.WILDCARD_PATH.getKey()).asPropertyList()) {
                    writer.writeStartElement(XMLElement.REMOTE_CLUSTER.getLocalName());
                    String remoteClusterName = clusterEntry.getName();
                    ModelNode remoteCluster = clusterEntry.getValue();
                    writer.writeAttribute(XMLAttribute.NAME.getLocalName(), remoteClusterName);
                    writeAttributes(writer, remoteCluster, RemoteClusterResourceDefinition.Attribute.class);
                    writer.writeEndElement();
                }
                writer.writeEndElement();
                ModelNode securityModel = remoteContainer.get(SecurityResourceDefinition.PATH.getKeyValuePair());
                Set<SecurityResourceDefinition.Attribute> securityAttributes = EnumSet.allOf(SecurityResourceDefinition.Attribute.class);
                if (hasDefined(securityModel, securityAttributes)) {
                    writer.writeStartElement(XMLElement.SECURITY.getLocalName());
                    writeAttributes(writer, securityModel, securityAttributes);
                    writer.writeEndElement();
                }
                writer.writeEndElement();
            }
        }
    }
    writer.writeEndElement();
}
275911.8615107wildfly
 static ModifiableXaDataSource xaFrom(final OperationContext operationContext, final ModelNode dataSourceNode, final String dsName, final ExceptionSupplier<CredentialSource, Exception> credentialSourceSupplier, final ExceptionSupplier<CredentialSource, Exception> recoveryCredentialSourceSupplier) throws OperationFailedException, ValidateException {
    final Map<String, String> xaDataSourceProperty;
    xaDataSourceProperty = Collections.emptyMap();
    final String xaDataSourceClass = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, XA_DATASOURCE_CLASS);
    final String jndiName = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, JNDI_NAME);
    final String module = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, DATASOURCE_DRIVER);
    final String newConnectionSql = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, NEW_CONNECTION_SQL);
    final String poolName = dsName;
    final String urlDelimiter = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, URL_DELIMITER);
    final String urlSelectorStrategyClassName = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, URL_SELECTOR_STRATEGY_CLASS_NAME);
    final Boolean useJavaContext = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, USE_JAVA_CONTEXT);
    final Boolean enabled = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, ENABLED);
    final boolean connectable = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, CONNECTABLE);
    final Boolean tracking = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, TRACKING);
    final Boolean enlistmentTrace = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, ENLISTMENT_TRACE);
    final String mcp = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, MCP);
    final Integer maxPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(operationContext, dataSourceNode, MAX_POOL_SIZE);
    final Integer minPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(operationContext, dataSourceNode, MIN_POOL_SIZE);
    final Integer initialPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(operationContext, dataSourceNode, INITIAL_POOL_SIZE);
    final Boolean prefill = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, POOL_PREFILL);
    final Boolean fair = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, POOL_FAIR);
    final Boolean useStrictMin = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, POOL_USE_STRICT_MIN);
    final Boolean interleaving = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, INTERLEAVING);
    final Boolean noTxSeparatePool = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, NO_TX_SEPARATE_POOL);
    final Boolean padXid = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, PAD_XID);
    final Boolean isSameRmOverride = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, SAME_RM_OVERRIDE);
    final Boolean wrapXaDataSource = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, WRAP_XA_RESOURCE);
    final String flushStrategyString = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, POOL_FLUSH_STRATEGY);
    final FlushStrategy flushStrategy = FlushStrategy.forName(flushStrategyString);
    final Boolean allowMultipleUsers = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, ALLOW_MULTIPLE_USERS);
    Extension incrementer = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, CAPACITY_INCREMENTER_CLASS, CAPACITY_INCREMENTER_PROPERTIES);
    Extension decrementer = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, CAPACITY_DECREMENTER_CLASS, CAPACITY_DECREMENTER_PROPERTIES);
    final Capacity capacity = new Capacity(incrementer, decrementer);
    final Extension connectionListener = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, CONNECTION_LISTENER_CLASS, CONNECTION_LISTENER_PROPERTIES);
    final DsXaPool xaPool = new DsXaPoolImpl(minPoolSize, initialPoolSize, maxPoolSize, prefill, useStrictMin, flushStrategy, isSameRmOverride, interleaving, padXid, wrapXaDataSource, noTxSeparatePool, allowMultipleUsers, capacity, fair, connectionListener);
    final String username = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, USERNAME);
    final String password = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, PASSWORD);
    final String securityDomain = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, SECURITY_DOMAIN);
    if (securityDomain != null) {
        throw new OperationFailedException(ConnectorLogger.DS_DEPLOYER_LOGGER.legacySecurityNotSupported());
    }
    final String authenticationContext = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, AUTHENTICATION_CONTEXT);
    final Extension reauthPlugin = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, REAUTH_PLUGIN_CLASSNAME, REAUTHPLUGIN_PROPERTIES);
    final DsSecurity security = new DsSecurityImpl(username, password, authenticationContext, credentialSourceSupplier, reauthPlugin);
    final boolean sharePreparedStatements = SHARE_PREPARED_STATEMENTS.resolveModelAttribute(operationContext, dataSourceNode).asBoolean();
    final Long preparedStatementsCacheSize = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, PREPARED_STATEMENTS_CACHE_SIZE);
    final String trackStatementsString = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, TRACK_STATEMENTS);
    final Statement.TrackStatementsEnum trackStatements = Statement.TrackStatementsEnum.valueOf(trackStatementsString.toUpperCase(Locale.ENGLISH));
    final Statement statement = new StatementImpl(sharePreparedStatements, preparedStatementsCacheSize, trackStatements);
    final Integer allocationRetry = ModelNodeUtil.getIntIfSetOrGetDefault(operationContext, dataSourceNode, ALLOCATION_RETRY);
    final Long allocationRetryWaitMillis = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, ALLOCATION_RETRY_WAIT_MILLIS);
    final Long blockingTimeoutMillis = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, BLOCKING_TIMEOUT_WAIT_MILLIS);
    final Long idleTimeoutMinutes = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, IDLETIMEOUTMINUTES);
    final Long queryTimeout = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, QUERY_TIMEOUT);
    final Integer xaResourceTimeout = ModelNodeUtil.getIntIfSetOrGetDefault(operationContext, dataSourceNode, XA_RESOURCE_TIMEOUT);
    final Long useTryLock = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, USE_TRY_LOCK);
    final Boolean setTxQueryTimeout = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, SET_TX_QUERY_TIMEOUT);
    final TimeOut timeOut = new TimeOutImpl(blockingTimeoutMillis, idleTimeoutMinutes, allocationRetry, allocationRetryWaitMillis, xaResourceTimeout, setTxQueryTimeout, queryTimeout, useTryLock);
    final String transactionIsolationString = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, TRANSACTION_ISOLATION);
    TransactionIsolation transactionIsolation = null;
    if (transactionIsolationString != null) {
        transactionIsolation = TransactionIsolation.forName(transactionIsolationString);
        if (transactionIsolation == null) {
            transactionIsolation = TransactionIsolation.customLevel(transactionIsolationString);
        }
    }
    final String checkValidConnectionSql = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, CHECK_VALID_CONNECTION_SQL);
    final Extension exceptionSorter = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, EXCEPTION_SORTER_CLASSNAME, EXCEPTION_SORTER_MODULE, EXCEPTION_SORTER_PROPERTIES);
    final Extension staleConnectionChecker = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, STALE_CONNECTION_CHECKER_CLASSNAME, STALE_CONNECTION_CHECKER_MODULE, STALE_CONNECTION_CHECKER_PROPERTIES);
    final Extension validConnectionChecker = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, VALID_CONNECTION_CHECKER_CLASSNAME, VALID_CONNECTION_CHECKER_MODULE, VALID_CONNECTION_CHECKER_PROPERTIES);
    Long backgroundValidationMillis = ModelNodeUtil.getLongIfSetOrGetDefault(operationContext, dataSourceNode, BACKGROUNDVALIDATIONMILLIS);
    final Boolean backgroundValidation = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, BACKGROUNDVALIDATION);
    boolean useFastFail = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, USE_FAST_FAIL);
    final Boolean validateOnMatch = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, VALIDATE_ON_MATCH);
    final Boolean spy = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, SPY);
    final Boolean useCcm = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, USE_CCM);
    final Validation validation = new ValidationImpl(backgroundValidation, backgroundValidationMillis, useFastFail, validConnectionChecker, checkValidConnectionSql, validateOnMatch, staleConnectionChecker, exceptionSorter);
    final String recoveryUsername = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, RECOVERY_USERNAME);
    final String recoveryPassword = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, RECOVERY_PASSWORD);
    final String recoverySecurityDomain = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, RECOVERY_SECURITY_DOMAIN);
    if (recoverySecurityDomain != null) {
        throw new OperationFailedException(ConnectorLogger.DS_DEPLOYER_LOGGER.legacySecurityNotSupported());
    }
    final String recoveryAuthenticationContext = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, RECOVERY_AUTHENTICATION_CONTEXT);
    Boolean noRecovery = ModelNodeUtil.getBooleanIfSetOrGetDefault(operationContext, dataSourceNode, NO_RECOVERY);
    final String urlProperty = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(operationContext, dataSourceNode, URL_PROPERTY);
    Recovery recovery = null;
    if ((recoveryUsername != null && (recoveryPassword != null || recoveryCredentialSourceSupplier != null)) || recoverySecurityDomain != null || noRecovery != null) {
        Credential credential = null;
        if ((recoveryUsername != null && (recoveryPassword != null || recoveryCredentialSourceSupplier != null)) || recoverySecurityDomain != null)
            credential = new CredentialImpl(recoveryUsername, recoveryPassword, recoveryAuthenticationContext, recoveryCredentialSourceSupplier);
        Extension recoverPlugin = ModelNodeUtil.extractExtension(operationContext, dataSourceNode, RECOVER_PLUGIN_CLASSNAME, RECOVER_PLUGIN_PROPERTIES);
        if (noRecovery == null)
            noRecovery = Boolean.FALSE;
        recovery = new Recovery(credential, recoverPlugin, noRecovery);
    }
    return new ModifiableXaDataSource(transactionIsolation, timeOut, security, statement, validation, urlDelimiter, urlProperty, urlSelectorStrategyClassName, useJavaContext, poolName, enabled, jndiName, spy, useCcm, connectable, tracking, mcp, enlistmentTrace, xaDataSourceProperty, xaDataSourceClass, module, newConnectionSql, xaPool, recovery);
}
272922.5629107wildfly
protected List<BindingConfiguration> processDescriptorEntries(DeploymentUnit deploymentUnit, DeploymentDescriptorEnvironment environment, ResourceInjectionTarget resourceInjectionTarget, final ComponentDescription componentDescription, ClassLoader classLoader, DeploymentReflectionIndex deploymentReflectionIndex, final EEApplicationClasses applicationClasses) throws DeploymentUnitProcessingException {
    final RemoteEnvironment remoteEnvironment = environment.getEnvironment();
    List<BindingConfiguration> bindingDescriptions = new ArrayList<BindingConfiguration>();
    EJBReferencesMetaData ejbRefs = remoteEnvironment.getEjbReferences();
    if (ejbRefs != null) {
        for (EJBReferenceMetaData ejbRef : ejbRefs) {
            String name = ejbRef.getEjbRefName();
            String ejbName = ejbRef.getLink();
            String lookup = ejbRef.getLookupName() != null ? ejbRef.getLookupName() : ejbRef.getMappedName();
            String remoteInterface = ejbRef.getRemote();
            String home = ejbRef.getHome();
            Class<?> remoteInterfaceType = null;
            if (!isEmpty(home)) {
                try {
                    remoteInterfaceType = ClassLoadingUtils.loadClass(home, deploymentUnit);
                } catch (ClassNotFoundException e) {
                    throw EjbLogger.ROOT_LOGGER.failedToLoadViewClass(e, home);
                }
            } else if (!isEmpty(remoteInterface)) {
                try {
                    remoteInterfaceType = ClassLoadingUtils.loadClass(remoteInterface, deploymentUnit);
                } catch (ClassNotFoundException e) {
                    throw EjbLogger.ROOT_LOGGER.failedToLoadViewClass(e, remoteInterface);
                }
            }
            if (!name.startsWith("java:")) {
                name = environment.getDefaultContext() + name;
            }
            LookupInjectionSource injectionSource = new LookupInjectionSource(name);
            remoteInterfaceType = processInjectionTargets(resourceInjectionTarget, injectionSource, classLoader, deploymentReflectionIndex, ejbRef, remoteInterfaceType);
            final BindingConfiguration bindingConfiguration;
            EjbInjectionSource ejbInjectionSource = null;
            if (!isEmpty(lookup)) {
                if (!lookup.startsWith("java:")) {
                    bindingConfiguration = new BindingConfiguration(name, new EjbLookupInjectionSource(lookup, remoteInterfaceType));
                } else {
                    bindingConfiguration = new BindingConfiguration(name, new LookupInjectionSource(lookup));
                }
            } else {
                if (remoteInterfaceType == null) {
                    throw EjbLogger.ROOT_LOGGER.couldNotDetermineEjbRefForInjectionTarget(name, resourceInjectionTarget);
                }
                if (!isEmpty(ejbName)) {
                    bindingConfiguration = new BindingConfiguration(name, ejbInjectionSource = new EjbInjectionSource(ejbName, remoteInterfaceType.getName(), name, deploymentUnit, appclient));
                } else {
                    bindingConfiguration = new BindingConfiguration(name, ejbInjectionSource = new EjbInjectionSource(remoteInterfaceType.getName(), name, deploymentUnit, appclient));
                }
            }
            if (ejbInjectionSource != null) {
                deploymentUnit.addToAttachmentList(EjbDeploymentAttachmentKeys.EJB_INJECTIONS, ejbInjectionSource);
            }
            bindingDescriptions.add(bindingConfiguration);
        }
    }
    if (remoteEnvironment instanceof Environment && !appclient) {
        EJBLocalReferencesMetaData ejbLocalRefs = ((Environment) remoteEnvironment).getEjbLocalReferences();
        if (ejbLocalRefs != null) {
            for (EJBLocalReferenceMetaData ejbRef : ejbLocalRefs) {
                String name = ejbRef.getEjbRefName();
                String ejbName = ejbRef.getLink();
                String lookup = ejbRef.getLookupName() != null ? ejbRef.getLookupName() : ejbRef.getMappedName();
                String localInterface = ejbRef.getLocal();
                String localHome = ejbRef.getLocalHome();
                Class<?> localInterfaceType = null;
                if (!isEmpty(localHome)) {
                    try {
                        localInterfaceType = ClassLoadingUtils.loadClass(localHome, deploymentUnit);
                    } catch (ClassNotFoundException e) {
                        throw EjbLogger.ROOT_LOGGER.failedToLoadViewClass(e, localHome);
                    }
                } else if (!isEmpty(localInterface)) {
                    try {
                        localInterfaceType = ClassLoadingUtils.loadClass(localInterface, deploymentUnit);
                    } catch (ClassNotFoundException e) {
                        throw EjbLogger.ROOT_LOGGER.failedToLoadViewClass(e, localInterface);
                    }
                }
                if (!name.startsWith("java:")) {
                    name = environment.getDefaultContext() + name;
                }
                LookupInjectionSource injectionSource = new LookupInjectionSource(name);
                localInterfaceType = processInjectionTargets(resourceInjectionTarget, injectionSource, classLoader, deploymentReflectionIndex, ejbRef, localInterfaceType);
                if (localInterfaceType == null) {
                    throw EjbLogger.ROOT_LOGGER.couldNotDetermineEjbLocalRefForInjectionTarget(name, resourceInjectionTarget);
                }
                final BindingConfiguration bindingConfiguration;
                EjbInjectionSource ejbInjectionSource = null;
                if (!isEmpty(lookup)) {
                    if (!lookup.startsWith("java:")) {
                        bindingConfiguration = new BindingConfiguration(name, new EjbLookupInjectionSource(lookup, localInterfaceType));
                    } else {
                        bindingConfiguration = new BindingConfiguration(name, new LookupInjectionSource(lookup));
                    }
                } else if (!isEmpty(ejbName)) {
                    bindingConfiguration = new BindingConfiguration(name, ejbInjectionSource = new EjbInjectionSource(ejbName, localInterfaceType.getName(), name, deploymentUnit, appclient));
                } else {
                    bindingConfiguration = new BindingConfiguration(name, ejbInjectionSource = new EjbInjectionSource(localInterfaceType.getName(), name, deploymentUnit, appclient));
                }
                if (ejbInjectionSource != null) {
                    deploymentUnit.addToAttachmentList(EjbDeploymentAttachmentKeys.EJB_INJECTIONS, ejbInjectionSource);
                }
                bindingDescriptions.add(bindingConfiguration);
            }
        }
    }
    return bindingDescriptions;
}
273083.0527106wildfly
public OutputStream _invoke(final String opName, final InputStream in, final ResponseHandler handler) {
    EjbLogger.ROOT_LOGGER.tracef("EJBObject invocation: %s", opName);
    SkeletonStrategy op = methodInvokerMap.get(opName);
    if (op == null) {
        EjbLogger.ROOT_LOGGER.debugf("Unable to find opname '%s' valid operations:%s", opName, methodInvokerMap.keySet());
        throw new BAD_OPERATION(opName);
    }
    final NamespaceContextSelector selector = componentView.getComponent().getNamespaceContextSelector();
    final ClassLoader oldCl = WildFlySecurityManager.getCurrentContextClassLoaderPrivileged();
    NamespaceContextSelector.pushCurrentSelector(selector);
    try {
        WildFlySecurityManager.setCurrentContextClassLoaderPrivileged(classLoader);
        org.omg.CORBA_2_3.portable.OutputStream out;
        try {
            Object retVal;
            if (!home && opName.equals("_get_handle")) {
                retVal = new HandleImplIIOP(orb.object_to_string(_this_object()));
            } else if (home && opName.equals("_get_homeHandle")) {
                retVal = homeHandle;
            } else if (home && opName.equals("_get_EJBMetaData")) {
                retVal = ejbMetaData;
            } else {
                Principal identityPrincipal = null;
                Principal principal = null;
                Object credential = null;
                if (this.sasCurrent != null) {
                    final byte[] incomingIdentity = this.sasCurrent.get_incoming_principal_name();
                    if (incomingIdentity != null && incomingIdentity.length > 0) {
                        String name = new String(incomingIdentity, StandardCharsets.UTF_8);
                        int domainIndex = name.indexOf('@');
                        if (domainIndex > 0)
                            name = name.substring(0, domainIndex);
                        identityPrincipal = new NamePrincipal(name);
                    }
                    final byte[] incomingUsername = this.sasCurrent.get_incoming_username();
                    if (incomingUsername != null && incomingUsername.length > 0) {
                        final byte[] incomingPassword = this.sasCurrent.get_incoming_password();
                        String name = new String(incomingUsername, StandardCharsets.UTF_8);
                        int domainIndex = name.indexOf('@');
                        if (domainIndex > 0) {
                            name = name.substring(0, domainIndex);
                        }
                        principal = new NamePrincipal(name);
                        credential = new String(incomingPassword, StandardCharsets.UTF_8).toCharArray();
                    }
                }
                final Object[] params = op.readParams((org.omg.CORBA_2_3.portable.InputStream) in);
                if (!this.home && opName.equals("isIdentical") && params.length == 1) {
                    Object val = params[0];
                    retVal = val instanceof org.omg.CORBA.Object && handleIsIdentical((org.omg.CORBA.Object) val);
                } else {
                    if (this.securityDomain != null) {
                        SecurityIdentity identity = this.securityDomain.getAnonymousSecurityIdentity();
                        AuthenticationConfiguration authenticationConfiguration = AuthenticationConfiguration.empty();
                        if (identityPrincipal != null) {
                            if (principal != null) {
                                char[] password = (char[]) credential;
                                authenticationConfiguration = authenticationConfiguration.useName(principal.getName()).usePassword(password);
                                SecurityIdentity authenticatedIdentity = this.authenticate(principal, password);
                                identity = authenticatedIdentity.createRunAsIdentity(identityPrincipal.getName(), true);
                            } else {
                                identity = this.securityDomain.getAnonymousSecurityIdentity().createRunAsIdentity(identityPrincipal.getName(), true);
                            }
                        } else if (principal != null) {
                            char[] password = (char[]) credential;
                            authenticationConfiguration = authenticationConfiguration.useName(principal.getName()).usePassword(password);
                            identity = this.authenticate(principal, password);
                        }
                        final InterceptorContext interceptorContext = new InterceptorContext();
                        this.prepareInterceptorContext(op, params, interceptorContext);
                        try {
                            final AuthenticationContext context = AuthenticationContext.captureCurrent().with(MatchRule.ALL.matchProtocol("iiop"), authenticationConfiguration);
                            retVal = identity.runAs((PrivilegedExceptionAction<Object>) () -> context.run((PrivilegedExceptionAction<Object>) () -> this.componentView.invoke(interceptorContext)));
                        } catch (PrivilegedActionException e) {
                            throw e.getCause();
                        }
                    } else {
                        final InterceptorContext interceptorContext = new InterceptorContext();
                        prepareInterceptorContext(op, params, interceptorContext);
                        retVal = this.componentView.invoke(interceptorContext);
                    }
                }
            }
            out = (org.omg.CORBA_2_3.portable.OutputStream) handler.createReply();
            if (op.isNonVoid()) {
                op.writeRetval(out, retVal);
            }
        } catch (Throwable e) {
            EjbLogger.ROOT_LOGGER.trace("Exception in EJBObject invocation", e);
            if (e instanceof MBeanException) {
                e = ((MBeanException) e).getTargetException();
            }
            RmiIdlUtil.rethrowIfCorbaSystemException(e);
            out = (org.omg.CORBA_2_3.portable.OutputStream) handler.createExceptionReply();
            op.writeException(out, e);
        }
        return out;
    } finally {
        NamespaceContextSelector.popCurrentSelector();
        WildFlySecurityManager.setCurrentContextClassLoaderPrivileged(oldCl);
    }
}
272019.4231115wildfly
private BeanMetaDataConfig parseBean(final XMLExtendedStreamReader reader) throws XMLStreamException {
    BeanMetaDataConfig beanConfig = new BeanMetaDataConfig();
    final int count = reader.getAttributeCount();
    final Set<Attribute> required = EnumSet.of(Attribute.NAME);
    for (int i = 0; i < count; i++) {
        final Attribute attribute = Attribute.of(reader.getAttributeLocalName(i));
        required.remove(attribute);
        final String attributeValue = reader.getAttributeValue(i);
        switch(attribute) {
            case NAME:
                beanConfig.setName(attributeValue);
                break;
            case CLASS:
                beanConfig.setBeanClass(attributeValue);
                break;
            case MODE:
                beanConfig.setMode(ModeConfig.of(attributeValue));
                break;
            default:
                throw unexpectedAttribute(reader, i);
        }
    }
    if (required.isEmpty() == false) {
        throw missingRequired(reader, required);
    }
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case COMMENT:
                break;
            case END_ELEMENT:
                return beanConfig;
            case START_ELEMENT:
                switch(Element.of(reader.getLocalName())) {
                    case ALIAS:
                        Set<String> aliases = beanConfig.getAliases();
                        if (aliases == null) {
                            aliases = new HashSet<String>();
                            beanConfig.setAliases(aliases);
                        }
                        aliases.add(parseAlias(reader));
                        break;
                    case CLASSLOADER:
                        beanConfig.setModule(parseModuleConfig(reader));
                        break;
                    case CONSTRUCTOR:
                        beanConfig.setConstructor(parseConstructor(reader));
                        break;
                    case PROPERTY:
                        Set<PropertyConfig> properties = beanConfig.getProperties();
                        if (properties == null) {
                            properties = new HashSet<PropertyConfig>();
                            beanConfig.setProperties(properties);
                        }
                        properties.add(parseProperty(reader));
                        break;
                    case INSTALL:
                        List<InstallConfig> installs = beanConfig.getInstalls();
                        if (installs == null) {
                            installs = new ArrayList<InstallConfig>();
                            beanConfig.setInstalls(installs);
                        }
                        installs.add(parseInstall(reader));
                        break;
                    case UNINSTALL:
                        List<InstallConfig> uninstalls = beanConfig.getUninstalls();
                        if (uninstalls == null) {
                            uninstalls = new ArrayList<InstallConfig>();
                            beanConfig.setUninstalls(uninstalls);
                        }
                        uninstalls.add(parseInstall(reader));
                        break;
                    case INCALLBACK:
                        List<CallbackConfig> incallbacks = beanConfig.getIncallbacks();
                        if (incallbacks == null) {
                            incallbacks = new ArrayList<CallbackConfig>();
                            beanConfig.setIncallbacks(incallbacks);
                        }
                        incallbacks.add(parseCallback(reader));
                        break;
                    case UNCALLBACK:
                        List<CallbackConfig> uncallbacks = beanConfig.getUncallbacks();
                        if (uncallbacks == null) {
                            uncallbacks = new ArrayList<CallbackConfig>();
                            beanConfig.setUncallbacks(uncallbacks);
                        }
                        uncallbacks.add(parseCallback(reader));
                        break;
                    case DEPENDS:
                        Set<DependsConfig> depends = beanConfig.getDepends();
                        if (depends == null) {
                            depends = new HashSet<DependsConfig>();
                            beanConfig.setDepends(depends);
                        }
                        depends.add(parseDepends(reader));
                        break;
                    case CREATE:
                        beanConfig.setCreate(parseLifecycle(reader, "create"));
                        break;
                    case START:
                        beanConfig.setStart(parseLifecycle(reader, "start"));
                        break;
                    case STOP:
                        beanConfig.setStop(parseLifecycle(reader, "stop"));
                        break;
                    case DESTROY:
                        beanConfig.setDestroy(parseLifecycle(reader, "destroy"));
                        break;
                    case UNKNOWN:
                        throw unexpectedElement(reader);
                }
                break;
        }
    }
    throw unexpectedElement(reader);
}
273699.781155wildfly
public void testSingletonService(@ArquillianResource(NodeServiceServlet.class) @OperateOnDeployment(DEPLOYMENT_1) URL baseURL1, @ArquillianResource(NodeServiceServlet.class) @OperateOnDeployment(DEPLOYMENT_2) URL baseURL2) throws IOException, URISyntaxException {
    stop(NODE_2);
    try (CloseableHttpClient client = TestHttpClientUtils.promiscuousCookieHttpClient()) {
        HttpResponse response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_1)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_1, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.QUORUM_SERVICE_NAME)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertFalse(response.containsHeader(NodeServiceServlet.NODE_HEADER));
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        start(NODE_2);
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_1)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        stop(NODE_2);
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_1)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_1, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.QUORUM_SERVICE_NAME)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertFalse(response.containsHeader(NodeServiceServlet.NODE_HEADER));
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        start(NODE_2);
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        stop(NODE_1);
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.QUORUM_SERVICE_NAME)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertFalse(response.containsHeader(NodeServiceServlet.NODE_HEADER));
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        start(NODE_1);
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL1, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.DEFAULT_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
        response = client.execute(new HttpGet(NodeServiceServlet.createURI(baseURL2, NodeServiceActivator.QUORUM_SERVICE_NAME, NODE_2)));
        try {
            Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
            Assert.assertTrue(response.containsHeader(NodeServiceServlet.NODE_HEADER));
            Assert.assertEquals(NODE_2, response.getFirstHeader(NodeServiceServlet.NODE_HEADER).getValue());
        } finally {
            HttpClientUtils.closeQuietly(response);
        }
    }
}
282832.5521109cassandra
public void execute(NodeProbe probe) {
    PrintStream out = probe.output().out;
    Multimap<String, String> tablesList = HashMultimap.create();
    Multimap<String, String> allTables = HashMultimap.create();
    Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> tableMBeans = probe.getColumnFamilyStoreMBeanProxies();
    while (tableMBeans.hasNext()) {
        Map.Entry<String, ColumnFamilyStoreMBean> entry = tableMBeans.next();
        allTables.put(entry.getKey(), entry.getValue().getTableName());
    }
    if (args.size() == 2 && args.stream().noneMatch(arg -> arg.contains("."))) {
        tablesList.put(args.get(0), args.get(1));
    } else if (args.size() == 1) {
        Pair<String, String> ksTbPair = parseTheKsTbPair(args.get(0));
        tablesList.put(ksTbPair.left, ksTbPair.right);
    } else if (args.size() == 0) {
        tablesList = allTables;
    } else {
        throw new IllegalArgumentException("tablehistograms requires <keyspace> <table> or <keyspace.table> format argument.");
    }
    for (String keyspace : tablesList.keys()) {
        for (String table : tablesList.get(keyspace)) {
            if (!allTables.containsEntry(keyspace, table))
                throw new IllegalArgumentException("Unknown table " + keyspace + '.' + table);
        }
    }
    for (String keyspace : tablesList.keys().elementSet()) {
        for (String table : tablesList.get(keyspace)) {
            long[] estimatedPartitionSize = (long[]) probe.getColumnFamilyMetric(keyspace, table, "EstimatedPartitionSizeHistogram");
            long[] estimatedColumnCount = (long[]) probe.getColumnFamilyMetric(keyspace, table, "EstimatedColumnCountHistogram");
            double[] estimatedRowSizePercentiles = new double[7];
            double[] estimatedColumnCountPercentiles = new double[7];
            double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 };
            if (ArrayUtils.isEmpty(estimatedPartitionSize) || ArrayUtils.isEmpty(estimatedColumnCount)) {
                out.println("No SSTables exists, unable to calculate 'Partition Size' and 'Cell Count' percentiles");
                for (int i = 0; i < 7; i++) {
                    estimatedRowSizePercentiles[i] = Double.NaN;
                    estimatedColumnCountPercentiles[i] = Double.NaN;
                }
            } else {
                EstimatedHistogram partitionSizeHist = new EstimatedHistogram(estimatedPartitionSize);
                EstimatedHistogram columnCountHist = new EstimatedHistogram(estimatedColumnCount);
                if (partitionSizeHist.isOverflowed()) {
                    out.println(String.format("Row sizes are larger than %s, unable to calculate percentiles", partitionSizeHist.getLargestBucketOffset()));
                    for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = Double.NaN;
                } else {
                    for (int i = 0; i < offsetPercentiles.length; i++) estimatedRowSizePercentiles[i] = partitionSizeHist.percentile(offsetPercentiles[i]);
                }
                if (columnCountHist.isOverflowed()) {
                    out.println(String.format("Column counts are larger than %s, unable to calculate percentiles", columnCountHist.getLargestBucketOffset()));
                    for (int i = 0; i < estimatedColumnCountPercentiles.length; i++) estimatedColumnCountPercentiles[i] = Double.NaN;
                } else {
                    for (int i = 0; i < offsetPercentiles.length; i++) estimatedColumnCountPercentiles[i] = columnCountHist.percentile(offsetPercentiles[i]);
                }
                estimatedRowSizePercentiles[5] = partitionSizeHist.min();
                estimatedColumnCountPercentiles[5] = columnCountHist.min();
                estimatedRowSizePercentiles[6] = partitionSizeHist.max();
                estimatedColumnCountPercentiles[6] = columnCountHist.max();
            }
            String[] percentiles = new String[] { "50%", "75%", "95%", "98%", "99%", "Min", "Max" };
            Double[] readLatency = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspace, table, "ReadLatency"));
            Double[] writeLatency = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspace, table, "WriteLatency"));
            Double[] sstablesPerRead = probe.metricPercentilesAsArray((CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspace, table, "SSTablesPerReadHistogram"));
            out.println(format("%s/%s histograms", keyspace, table));
            out.println(format("%-10s%18s%18s%18s%18s%18s", "Percentile", "Read Latency", "Write Latency", "SSTables", "Partition Size", "Cell Count"));
            out.println(format("%-10s%18s%18s%18s%18s%18s", "", "(micros)", "(micros)", "", "(bytes)", ""));
            for (int i = 0; i < percentiles.length; i++) {
                out.println(format("%-10s%18.2f%18.2f%18.2f%18.0f%18.0f", percentiles[i], readLatency[i], writeLatency[i], sstablesPerRead[i], estimatedRowSizePercentiles[i], estimatedColumnCountPercentiles[i]));
            }
            out.println();
        }
    }
}
282800.9518111cassandra
public static void main(String[] args) throws ConfigurationException {
    CommandLineParser parser = new PosixParser();
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e1) {
        System.err.println(e1.getMessage());
        printUsage();
        System.exit(1);
    }
    String[] keys = cmd.getOptionValues(KEY_OPTION);
    HashSet<String> excludes = new HashSet<>(Arrays.asList(cmd.getOptionValues(EXCLUDE_KEY_OPTION) == null ? new String[0] : cmd.getOptionValues(EXCLUDE_KEY_OPTION)));
    if (cmd.getArgs().length != 1) {
        String msg = "You must supply exactly one sstable";
        if (cmd.getArgs().length == 0 && (keys != null && keys.length > 0 || !excludes.isEmpty()))
            msg += ", which should be before the -k/-x options so it's not interpreted as a partition key.";
        System.err.println(msg);
        printUsage();
        System.exit(1);
    }
    File ssTableFile = new File(cmd.getArgs()[0]);
    if (!ssTableFile.exists()) {
        System.err.println("Cannot find file " + ssTableFile.absolutePath());
        System.exit(1);
    }
    Descriptor desc = Descriptor.fromFileWithComponent(ssTableFile, false).left;
    try {
        TableMetadata metadata = Util.metadataFromSSTable(desc);
        SSTableReader sstable = SSTableReader.openNoValidation(null, desc, TableMetadataRef.forOfflineTools(metadata));
        if (cmd.hasOption(ENUMERATE_KEYS_OPTION)) {
            try (KeyIterator iter = sstable.keyIterator()) {
                JsonTransformer.keysToJson(null, Util.iterToStream(iter), cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        } else {
            IPartitioner partitioner = sstable.getPartitioner();
            final ISSTableScanner currentScanner;
            if ((keys != null) && (keys.length > 0)) {
                List<AbstractBounds<PartitionPosition>> bounds = Arrays.stream(keys).filter(key -> !excludes.contains(key)).map(metadata.partitionKeyType::fromString).map(partitioner::decorateKey).sorted().map(DecoratedKey::getToken).map(token -> new Bounds<>(token.minKeyBound(), token.maxKeyBound())).collect(Collectors.toList());
                currentScanner = sstable.getScanner(bounds.iterator());
            } else {
                currentScanner = sstable.getScanner();
            }
            Stream<UnfilteredRowIterator> partitions = Util.iterToStream(currentScanner).filter(i -> excludes.isEmpty() || !excludes.contains(metadata.partitionKeyType.getString(i.partitionKey().getKey())));
            if (cmd.hasOption(DEBUG_OUTPUT_OPTION)) {
                AtomicLong position = new AtomicLong();
                partitions.forEach(partition -> {
                    position.set(currentScanner.getCurrentPosition());
                    if (!partition.partitionLevelDeletion().isLive()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.partitionLevelDeletion());
                    }
                    if (!partition.staticRow().isEmpty()) {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + partition.staticRow().toString(metadata, true));
                    }
                    partition.forEachRemaining(row -> {
                        System.out.println("[" + metadata.partitionKeyType.getString(partition.partitionKey().getKey()) + "]@" + position.get() + " " + row.toString(metadata, false, true));
                        position.set(currentScanner.getCurrentPosition());
                    });
                });
            } else if (cmd.hasOption(PARTITION_JSON_LINES)) {
                JsonTransformer.toJsonLines(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            } else {
                JsonTransformer.toJson(currentScanner, partitions, cmd.hasOption(RAW_TIMESTAMPS), metadata, System.out);
            }
        }
    } catch (IOException e) {
        e.printStackTrace(System.err);
    }
    System.exit(0);
}
282318.821115cassandra
private Message.Request parseLine(String line) {
    Splitter splitter = Splitter.on(' ').trimResults().omitEmptyStrings();
    Iterator<String> iter = splitter.split(line).iterator();
    if (!iter.hasNext())
        return null;
    String msgType = iter.next().toUpperCase();
    if (msgType.equals("STARTUP")) {
        Map<String, String> options = new HashMap<String, String>();
        options.put(StartupMessage.CQL_VERSION, "3.0.0");
        while (iter.hasNext()) {
            String next = iter.next();
            if (next.toLowerCase().equals("snappy")) {
                options.put(StartupMessage.COMPRESSION, "snappy");
                connection.setCompressor(Compressor.SnappyCompressor.instance);
            }
            if (next.toLowerCase().equals("lz4")) {
                options.put(StartupMessage.COMPRESSION, "lz4");
                connection.setCompressor(Compressor.LZ4Compressor.instance);
            }
            if (next.toLowerCase().equals("throw_on_overload")) {
                options.put(StartupMessage.THROW_ON_OVERLOAD, "1");
                connection.setThrowOnOverload(true);
            }
        }
        return new StartupMessage(options);
    } else if (msgType.equals("QUERY")) {
        line = line.substring(6);
        String query = line;
        int pageSize = -1;
        if (line.matches(".+ !\\d+$")) {
            int idx = line.lastIndexOf('!');
            query = line.substring(0, idx - 1);
            try {
                pageSize = Integer.parseInt(line.substring(idx + 1, line.length()));
            } catch (NumberFormatException e) {
                return null;
            }
        }
        return new QueryMessage(query, QueryOptions.create(ConsistencyLevel.ONE, Collections.<ByteBuffer>emptyList(), false, pageSize, null, null, version, null));
    } else if (msgType.equals("PREPARE")) {
        String query = line.substring(8);
        return new PrepareMessage(query, null);
    } else if (msgType.equals("EXECUTE")) {
        try {
            byte[] preparedStatementId = Hex.hexToBytes(iter.next());
            byte[] resultMetadataId = Hex.hexToBytes(iter.next());
            List<ByteBuffer> values = new ArrayList<ByteBuffer>();
            while (iter.hasNext()) {
                String next = iter.next();
                ByteBuffer bb;
                try {
                    int v = Integer.parseInt(next);
                    bb = Int32Type.instance.decompose(v);
                } catch (NumberFormatException e) {
                    bb = UTF8Type.instance.decompose(next);
                }
                values.add(bb);
            }
            return new ExecuteMessage(MD5Digest.wrap(preparedStatementId), MD5Digest.wrap(resultMetadataId), QueryOptions.forInternalCalls(ConsistencyLevel.ONE, values));
        } catch (Exception e) {
            return null;
        }
    } else if (msgType.equals("OPTIONS")) {
        return new OptionsMessage();
    } else if (msgType.equals("AUTHENTICATE")) {
        Map<String, String> credentials = readCredentials(iter);
        if (!credentials.containsKey(PasswordAuthenticator.USERNAME_KEY) || !credentials.containsKey(PasswordAuthenticator.PASSWORD_KEY)) {
            System.err.println("[ERROR] Authentication requires both 'username' and 'password'");
            return null;
        }
        return new AuthResponse(encodeCredentialsForSasl(credentials));
    } else if (msgType.equals("REGISTER")) {
        String type = line.substring(9).toUpperCase();
        try {
            return new RegisterMessage(Collections.singletonList(Enum.valueOf(Event.Type.class, type)));
        } catch (IllegalArgumentException e) {
            System.err.println("[ERROR] Unknown event type: " + type);
            return null;
        }
    }
    return null;
}
283656.544134cassandra
public void testGrantedKeyspace() throws Throwable {
    useSuperUser();
    executeNet(String.format("CREATE ROLE %s WITH LOGIN = TRUE AND password='%s'", user, pass));
    executeNet("GRANT CREATE ON KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
    String table = KEYSPACE_PER_TEST + '.' + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
    String index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
    String type = KEYSPACE_PER_TEST + '.' + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
    String mv = KEYSPACE_PER_TEST + ".ks_mv_01";
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    useUser(user, pass);
    final String spinAssertTable = table;
    Util.spinAssertEquals(false, () -> {
        try {
            assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1"));
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "ALTER TYPE " + type + " ADD c bigint");
    assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "DROP TYPE " + type);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP MATERIALIZED VIEW " + mv);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP INDEX " + index);
    useSuperUser();
    executeNet("GRANT ALTER, DROP, SELECT, MODIFY ON KEYSPACE " + KEYSPACE_PER_TEST + " TO " + user);
    useUser(user, pass);
    Util.spinAssertEquals(false, () -> {
        try {
            executeNet("ALTER KEYSPACE " + KEYSPACE_PER_TEST + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}");
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
    assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    executeNet("DROP MATERIALIZED VIEW " + mv);
    executeNet("DROP INDEX " + index);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    executeNet("ALTER TYPE " + type + " ADD c bigint");
    executeNet("DROP TYPE " + type);
    table = createTableName();
    type = KEYSPACE_PER_TEST + "." + createTypeName();
    mv = KEYSPACE_PER_TEST + ".ks_mv_02";
    executeNet("CREATE TYPE " + type + " (a int, b text)");
    executeNet(formatQuery(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))"));
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    executeNet(formatQuery(KEYSPACE_PER_TEST, "INSERT INTO %s (pk, ck, val, val_2) VALUES (1, 1, 1, '1')"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "UPDATE %s SET val = 1 WHERE pk = 1 AND ck = 1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DELETE FROM %s WHERE pk = 1 AND ck = 2"));
    assertRowsNet(executeNet(formatQuery(KEYSPACE_PER_TEST, "SELECT * FROM %s WHERE pk = 1 AND ck = 1")), row(1, 1, 1, "1"));
    assertRowsNet(executeNet("SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1"), row(1, 1, 1, "1"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "TRUNCATE TABLE %s"));
    executeNet(formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE %s ADD val_3 int"));
    executeNet("DROP MATERIALIZED VIEW " + mv);
    executeNet(formatQuery(KEYSPACE_PER_TEST, "DROP TABLE %s"));
    executeNet("ALTER TYPE " + type + " ADD c bigint");
    executeNet("DROP TYPE " + type);
    useSuperUser();
    executeNet("REVOKE ALTER, DROP, MODIFY, SELECT ON KEYSPACE " + KEYSPACE_PER_TEST + " FROM " + user);
    table = KEYSPACE_PER_TEST + "." + createTable(KEYSPACE_PER_TEST, "CREATE TABLE %s (pk int, ck int, val int, val_2 text, PRIMARY KEY (pk, ck))");
    type = KEYSPACE_PER_TEST + "." + createType(KEYSPACE_PER_TEST, "CREATE TYPE %s (a int, b text)");
    index = KEYSPACE_PER_TEST + '.' + createIndex(KEYSPACE_PER_TEST, "CREATE INDEX ON %s (val_2)");
    mv = KEYSPACE_PER_TEST + ".ks_mv_03";
    executeNet("CREATE MATERIALIZED VIEW " + mv + " AS SELECT * FROM " + table + " WHERE val IS NOT NULL AND pk IS NOT NULL AND ck IS NOT NULL PRIMARY KEY (val, pk, ck)");
    useUser(user, pass);
    final String spinAssertTable2 = table;
    Util.spinAssertEquals(false, () -> {
        try {
            assertUnauthorizedQuery("User user has no MODIFY permission on <table " + spinAssertTable2 + "> or any of its parents", "INSERT INTO " + spinAssertTable2 + " (pk, ck, val, val_2) VALUES (1, 1, 1, '1')");
        } catch (Throwable e) {
            return true;
        }
        return false;
    }, 10);
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "UPDATE " + table + " SET val = 1 WHERE pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "DELETE FROM " + table + " WHERE pk = 1 AND ck = 2");
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + table + " WHERE pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no SELECT permission on <table " + table + "> or any of its parents", "SELECT * FROM " + mv + " WHERE val = 1 AND pk = 1 AND ck = 1");
    assertUnauthorizedQuery("User user has no MODIFY permission on <table " + table + "> or any of its parents", "TRUNCATE TABLE " + table);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "ALTER TABLE " + table + " ADD val_3 int"));
    assertUnauthorizedQuery("User user has no DROP permission on <table " + table + "> or any of its parents", formatQuery(KEYSPACE_PER_TEST, "DROP TABLE " + table));
    assertUnauthorizedQuery("User user has no ALTER permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "ALTER TYPE " + type + " ADD c bigint");
    assertUnauthorizedQuery("User user has no DROP permission on <all tables in " + KEYSPACE_PER_TEST + "> or any of its parents", "DROP TYPE " + type);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP MATERIALIZED VIEW " + mv);
    assertUnauthorizedQuery("User user has no ALTER permission on <table " + table + "> or any of its parents", "DROP INDEX " + index);
}
285476.161123cassandra
public void testSecondaryIndex() throws Throwable {
    Util.assumeLegacySecondaryIndex();
    createTable("CREATE TABLE %s (a frozen<map<int, text>> PRIMARY KEY, b frozen<map<int, text>>)");
    assertInvalidIndexCreationWithMessage("CREATE INDEX ON %s (full(a))", "Cannot create secondary index on the only partition key column");
    assertInvalidIndexCreationWithMessage("CREATE INDEX ON %s (keys(a))", "Cannot create secondary index on the only partition key column");
    assertInvalidIndexCreationWithMessage("CREATE INDEX ON %s (keys(b))", "Cannot create keys() index on frozen column b. " + "Frozen collections are immutable and must be fully indexed");
    createTable("CREATE TABLE %s (a int, b frozen<list<int>>, c frozen<set<int>>, d frozen<map<int, text>>, PRIMARY KEY (a, b))");
    createIndex("CREATE INDEX ON %s (full(b))");
    createIndex("CREATE INDEX ON %s (full(c))");
    createIndex("CREATE INDEX ON %s (full(d))");
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, list(1, 2, 3), set(1, 2, 3), map(1, "a"));
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 0, list(4, 5, 6), set(1, 2, 3), map(1, "a"));
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, list(1, 2, 3), set(4, 5, 6), map(2, "b"));
    execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", 1, list(4, 5, 6), set(4, 5, 6), map(2, "b"));
    assertInvalidMessage("Cannot use CONTAINS KEY on non-map column", "SELECT * FROM %s WHERE b CONTAINS KEY ?", 1);
    assertInvalidMessage("Cannot use CONTAINS KEY on non-map column", "SELECT * FROM %s WHERE b CONTAINS KEY ? ALLOW FILTERING", 1);
    assertInvalidMessage("Cannot use CONTAINS KEY on non-map column", "SELECT * FROM %s WHERE c CONTAINS KEY ?", 1);
    assertInvalidMessage("Clustering column restrictions require the use of secondary indices or" + " filtering for map-element restrictions and for the following operators: CONTAINS, CONTAINS KEY, LIKE, ANN", "SELECT * FROM %s WHERE b CONTAINS ?", 1);
    assertRows(execute("SELECT * FROM %s WHERE b CONTAINS ? ALLOW FILTERING", 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(1, list(1, 2, 3), set(4, 5, 6), map(2, "b")));
    assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE d CONTAINS KEY ?", 1);
    assertRows(execute("SELECT * FROM %s WHERE b CONTAINS ? AND d CONTAINS KEY ? ALLOW FILTERING", 1, 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE b=?", list(1, 2, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(1, list(1, 2, 3), set(4, 5, 6), map(2, "b")));
    assertEmpty(execute("SELECT * FROM %s WHERE b=?", list(-1)));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE b=? AND c=?", list(1, 2, 3), set(4, 5, 6));
    assertRows(execute("SELECT * FROM %s WHERE b=? AND c=? ALLOW FILTERING", list(1, 2, 3), set(4, 5, 6)), row(1, list(1, 2, 3), set(4, 5, 6), map(2, "b")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE b=? AND c CONTAINS ?", list(1, 2, 3), 5);
    assertRows(execute("SELECT * FROM %s WHERE b=? AND c CONTAINS ? ALLOW FILTERING", list(1, 2, 3), 5), row(1, list(1, 2, 3), set(4, 5, 6), map(2, "b")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE b=? AND d=?", list(1, 2, 3), map(1, "a"));
    assertRows(execute("SELECT * FROM %s WHERE b=? AND d=? ALLOW FILTERING", list(1, 2, 3), map(1, "a")), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE b=? AND d CONTAINS ?", list(1, 2, 3), "a");
    assertRows(execute("SELECT * FROM %s WHERE b=? AND d CONTAINS ? ALLOW FILTERING", list(1, 2, 3), "a"), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE b=? AND d CONTAINS KEY ?", list(1, 2, 3), 1);
    assertRows(execute("SELECT * FROM %s WHERE b=? AND d CONTAINS KEY ? ALLOW FILTERING", list(1, 2, 3), 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE c=?", set(1, 2, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE c=?", set(2, 1, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertEmpty(execute("SELECT * FROM %s WHERE c=?", set(-1)));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE c=? AND b=?", set(1, 2, 3), list(1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE c=? AND b=? ALLOW FILTERING", set(1, 2, 3), list(1, 2, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE c=? AND b CONTAINS ?", set(1, 2, 3), 1);
    assertRows(execute("SELECT * FROM %s WHERE c=? AND b CONTAINS ? ALLOW FILTERING", set(1, 2, 3), 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE c=? AND d = ?", set(1, 2, 3), map(1, "a"));
    assertRows(execute("SELECT * FROM %s WHERE c=? AND d = ? ALLOW FILTERING", set(1, 2, 3), map(1, "a")), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE c=? AND d CONTAINS ?", set(1, 2, 3), "a");
    assertRows(execute("SELECT * FROM %s WHERE c=? AND d CONTAINS ? ALLOW FILTERING", set(1, 2, 3), "a"), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE c=? AND d CONTAINS KEY ?", set(1, 2, 3), 1);
    assertRows(execute("SELECT * FROM %s WHERE c=? AND d CONTAINS KEY ? ALLOW FILTERING", set(1, 2, 3), 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE d=?", map(1, "a")), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE d=?", map(2, "b")), row(1, list(1, 2, 3), set(4, 5, 6), map(2, "b")), row(1, list(4, 5, 6), set(4, 5, 6), map(2, "b")));
    assertEmpty(execute("SELECT * FROM %s WHERE d=?", map(3, "c")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE d=? AND c=?", map(1, "a"), set(1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE d=? AND b=? ALLOW FILTERING", map(1, "a"), list(1, 2, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE d=? AND b CONTAINS ?", map(1, "a"), 3);
    assertRows(execute("SELECT * FROM %s WHERE d=? AND b CONTAINS ? ALLOW FILTERING", map(1, "a"), 3), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertInvalidMessage("ALLOW FILTERING", "SELECT * FROM %s WHERE d=? AND b=? AND c=?", map(1, "a"), list(1, 2, 3), set(1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE d=? AND b=? AND c=? ALLOW FILTERING", map(1, "a"), list(1, 2, 3), set(1, 2, 3)), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE d=? AND b CONTAINS ? AND c CONTAINS ? ALLOW FILTERING", map(1, "a"), 2, 2), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")));
    assertRows(execute("SELECT * FROM %s WHERE d CONTAINS KEY ? ALLOW FILTERING", 1), row(0, list(1, 2, 3), set(1, 2, 3), map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
    execute("DELETE d FROM %s WHERE a=? AND b=?", 0, list(1, 2, 3));
    assertRows(execute("SELECT * FROM %s WHERE d=?", map(1, "a")), row(0, list(4, 5, 6), set(1, 2, 3), map(1, "a")));
}
281859.721159cassandra
public void testWideTableWithMulticolumnKey() throws Throwable {
    createTable("CREATE TABLE %s (k1 int, k2 int, c1 int, c2 int, c3 int, v1 int, v2 int, PRIMARY KEY((k1, k2), c1, c2, c3))");
    test("SELECT * FROM %s");
    test("SELECT k1 FROM %s", "SELECT * FROM %s");
    test("SELECT k2 FROM %s", "SELECT * FROM %s");
    test("SELECT c1 FROM %s", "SELECT * FROM %s");
    test("SELECT c2 FROM %s", "SELECT * FROM %s");
    test("SELECT c3 FROM %s", "SELECT * FROM %s");
    test("SELECT v1 FROM %s");
    test("SELECT v2 FROM %s");
    test("SELECT k1, k2, c1, c2, c3, v1, v2 FROM %s", "SELECT v1, v2 FROM %s");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT k1 FROM %s WHERE k1 = 1 AND k2 = 2", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT k2 FROM %s WHERE k1 = 1 AND k2 = 2", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT c1 FROM %s WHERE k1 = 1 AND k2 = 2", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT c2 FROM %s WHERE k1 = 1 AND k2 = 2", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT v1 FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT v2 FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT k1, k2, c1, c2, v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2", "SELECT v1, v2 FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 < 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 <= 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 >= 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 < 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 <= 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 >= 1 AND c1 < 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 > 1 AND c1 < 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 < 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 <= 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 >= 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 < 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 <= 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 >= 2 AND c2 < 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 > 2 AND c2 < 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 = 3", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND (c1, c2, c3) = (1, 2, 3)");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 < 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 <= 3");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3 AND c3 < 4");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 > 3 AND c3 <= 4");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3 AND c3 < 4");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND c2 = 2 AND c3 >= 3 AND c3 <= 4");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND (c1, c2, c3) = (1, 2, 3)");
    test("SELECT * FROM %s WHERE token(k1, k2) > 0");
    test("SELECT * FROM %s WHERE token(k1, k2) < 0");
    test("SELECT * FROM %s WHERE token(k1, k2) >= 0");
    test("SELECT * FROM %s WHERE token(k1, k2) <= 0");
    test("SELECT * FROM %s WHERE token(k1, k2) = 0", "SELECT * FROM %s WHERE token(k1, k2) >= 0 AND token(k1, k2) <= 0");
    test("SELECT * FROM %s WHERE k1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k2 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE c1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE c2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE c3 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE v1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND c1 = 1 AND v1 = 1 AND v2 = 2 ALLOW FILTERING");
    createIndex("CREATE INDEX ON %s (k1)");
    createIndex("CREATE INDEX ON %s (k2)");
    createIndex("CREATE INDEX ON %s (c1)");
    createIndex("CREATE INDEX ON %s (c2)");
    createIndex("CREATE INDEX ON %s (c3)");
    createIndex("CREATE INDEX ON %s (v1)");
    createIndex("CREATE INDEX ON %s (v2)");
    test("SELECT * FROM %s WHERE k1 = 1");
    test("SELECT * FROM %s WHERE k2 = 2");
    test("SELECT * FROM %s WHERE c1 = 1");
    test("SELECT * FROM %s WHERE c2 = 2");
    test("SELECT * FROM %s WHERE c3 = 3");
    test("SELECT * FROM %s WHERE v1 = 1");
    test("SELECT * FROM %s WHERE v2 = 2");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT * FROM %s WHERE c1 = 1 AND c2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE c1 = 1 AND c2 = 2 AND c3 = 3 ALLOW FILTERING", "SELECT * FROM %s WHERE (c1, c2, c3) = (1, 2, 3) ALLOW FILTERING");
    test("SELECT * FROM %s WHERE v1 = 1 AND v2 = 2 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE token(k1, k2) > 0 AND v1 = 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND v1 = 1", "SELECT * FROM %s WHERE token(k1, k2) >= token(1, 2) AND token(k1, k2) <= token(1, 2) AND v1 = 1");
    test("SELECT * FROM %s WHERE k1 = 1 AND k2 = 2 AND c1 = 1 AND v1 = 1", "SELECT * FROM %s WHERE token(k1, k2) >= token(1, 2) AND token(k1, k2) <= token(1, 2) AND c1 = 1 AND v1 = 1 ALLOW FILTERING");
    test("SELECT * FROM %s WHERE k1 IN (1) AND k2 IN (2)", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4)", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4");
    test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND c1 = 0", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND c1 = 0", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND c1 = 0", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND c1 = 0", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND c1 = 0");
    test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND c1 > 0", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND c1 > 0", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND c1 > 0", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND c1 > 0", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND c1 > 0");
    test("SELECT * FROM %s WHERE k1 IN (1, 2) AND k2 IN (3, 4) AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 3 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))", "SELECT * FROM %s WHERE k1 = 1 AND k2 = 4 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 3 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))", "SELECT * FROM %s WHERE k1 = 2 AND k2 = 4 AND (c1, c2, c3) IN ((5, 6, 7), (8, 9, 10))");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC, c2 DESC, c3 DESC");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1, c2", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1, c2 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC, c2", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 ASC, c2 ASC, c3 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 ORDER BY c1 DESC, c2 DESC, c3 DESC");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC, c2 DESC, c3 DESC");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1, c2", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1, c2 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC, c2", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 ASC, c2 ASC, c3 ASC", "SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1");
    test("SELECT * FROM %s WHERE k1 = 0 AND k2 = 2 AND c1 = 1 ORDER BY c1 DESC, c2 DESC, c3 DESC");
}
284477.861126cassandra
public void testDiff() {
    ContextState left;
    ContextState right;
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 3L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.wrap(ByteBufferUtil.clone(left.context));
    assertEquals(Relationship.EQUAL, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 4);
    left.writeRemote(CounterId.fromInt(3), 3L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    left.writeRemote(CounterId.fromInt(12), 0L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 3L, 0L);
    right.writeRemote(CounterId.fromInt(6), 2L, 0L);
    right.writeRemote(CounterId.fromInt(9), 1L, 0L);
    assertEquals(Relationship.GREATER_THAN, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 3L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.allocate(0, 0, 4);
    right.writeRemote(CounterId.fromInt(3), 3L, 0L);
    right.writeRemote(CounterId.fromInt(6), 2L, 0L);
    right.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right.writeRemote(CounterId.fromInt(12), 0L, 0L);
    assertEquals(Relationship.LESS_THAN, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 3L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 3L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 3L, 0L);
    right.writeRemote(CounterId.fromInt(6), 2L, 0L);
    right.writeRemote(CounterId.fromInt(9), 1L, 0L);
    assertEquals(Relationship.GREATER_THAN, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 3L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 3L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 3L, 0L);
    right.writeRemote(CounterId.fromInt(6), 9L, 0L);
    right.writeRemote(CounterId.fromInt(9), 3L, 0L);
    assertEquals(Relationship.LESS_THAN, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 1L, 0L);
    left.writeRemote(CounterId.fromInt(4), 1L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 1L, 0L);
    right.writeRemote(CounterId.fromInt(6), 1L, 0L);
    right.writeRemote(CounterId.fromInt(9), 1L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 1L, 0L);
    left.writeRemote(CounterId.fromInt(4), 1L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(2), 1L, 0L);
    right.writeRemote(CounterId.fromInt(6), 1L, 0L);
    right.writeRemote(CounterId.fromInt(12), 1L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 1L, 0L);
    left.writeRemote(CounterId.fromInt(6), 3L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 1L, 0L);
    right.writeRemote(CounterId.fromInt(6), 1L, 0L);
    right.writeRemote(CounterId.fromInt(9), 5L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 2L, 0L);
    left.writeRemote(CounterId.fromInt(6), 3L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 1L, 0L);
    right.writeRemote(CounterId.fromInt(6), 9L, 0L);
    right.writeRemote(CounterId.fromInt(9), 5L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 4);
    left.writeRemote(CounterId.fromInt(3), 2L, 0L);
    left.writeRemote(CounterId.fromInt(6), 3L, 0L);
    left.writeRemote(CounterId.fromInt(9), 1L, 0L);
    left.writeRemote(CounterId.fromInt(12), 1L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 4L, 0L);
    right.writeRemote(CounterId.fromInt(6), 9L, 0L);
    right.writeRemote(CounterId.fromInt(9), 5L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 5L, 0L);
    left.writeRemote(CounterId.fromInt(6), 3L, 0L);
    left.writeRemote(CounterId.fromInt(9), 2L, 0L);
    right = ContextState.allocate(0, 0, 4);
    right.writeRemote(CounterId.fromInt(3), 4L, 0L);
    right.writeRemote(CounterId.fromInt(6), 3L, 0L);
    right.writeRemote(CounterId.fromInt(9), 2L, 0L);
    right.writeRemote(CounterId.fromInt(12), 1L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 3);
    left.writeRemote(CounterId.fromInt(3), 5L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 2L, 0L);
    right = ContextState.allocate(0, 0, 4);
    right.writeRemote(CounterId.fromInt(3), 4L, 0L);
    right.writeRemote(CounterId.fromInt(6), 3L, 0L);
    right.writeRemote(CounterId.fromInt(9), 2L, 0L);
    right.writeRemote(CounterId.fromInt(12), 1L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
    left = ContextState.allocate(0, 0, 4);
    left.writeRemote(CounterId.fromInt(3), 5L, 0L);
    left.writeRemote(CounterId.fromInt(6), 2L, 0L);
    left.writeRemote(CounterId.fromInt(7), 2L, 0L);
    left.writeRemote(CounterId.fromInt(9), 2L, 0L);
    right = ContextState.allocate(0, 0, 3);
    right.writeRemote(CounterId.fromInt(3), 4L, 0L);
    right.writeRemote(CounterId.fromInt(6), 3L, 0L);
    right.writeRemote(CounterId.fromInt(9), 2L, 0L);
    assertEquals(Relationship.DISJOINT, cc.diff(left.context, right.context));
}
282595.8718115elasticsearch
public void testRoundRobinRetryErrors() throws Exception {
    RestClient restClient = createRestClient(NodeSelector.ANY);
    String retryEndpoint = randomErrorRetryEndpoint();
    try {
        RestClientSingleHostTests.performRequestSyncOrAsync(restClient, new Request(randomHttpMethod(getRandom()), retryEndpoint));
        fail("request should have failed");
    } catch (ResponseException e) {
        Set<HttpHost> hostsSet = hostsSet();
        failureListener.assertCalled(nodes);
        do {
            Response response = e.getResponse();
            assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode());
            assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", hostsSet.remove(response.getHost()));
            if (e.getSuppressed().length > 0) {
                assertEquals(1, e.getSuppressed().length);
                Throwable suppressed = e.getSuppressed()[0];
                assertThat(suppressed, instanceOf(ResponseException.class));
                e = (ResponseException) suppressed;
            } else {
                e = null;
            }
        } while (e != null);
        assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
    } catch (IOException e) {
        Set<HttpHost> hostsSet = hostsSet();
        failureListener.assertCalled(nodes);
        do {
            HttpHost httpHost = HttpHost.create(e.getMessage());
            assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
            if (e.getSuppressed().length > 0) {
                assertEquals(1, e.getSuppressed().length);
                Throwable suppressed = e.getSuppressed()[0];
                assertThat(suppressed, instanceOf(IOException.class));
                e = (IOException) suppressed;
            } else {
                e = null;
            }
        } while (e != null);
        assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
    }
    int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
    for (int i = 1; i <= numIters; i++) {
        Set<HttpHost> hostsSet = hostsSet();
        for (int j = 0; j < nodes.size(); j++) {
            retryEndpoint = randomErrorRetryEndpoint();
            try {
                RestClientSingleHostTests.performRequestSyncOrAsync(restClient, new Request(randomHttpMethod(getRandom()), retryEndpoint));
                fail("request should have failed");
            } catch (ResponseException e) {
                Response response = e.getResponse();
                assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
                assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", hostsSet.remove(response.getHost()));
                failureListener.assertCalled(response.getHost());
                assertEquals(0, e.getSuppressed().length);
            } catch (IOException e) {
                HttpHost httpHost = HttpHost.create(e.getMessage());
                assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
                failureListener.assertCalled(httpHost);
                assertEquals(0, e.getSuppressed().length);
            }
        }
        assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
        if (getRandom().nextBoolean()) {
            HttpHost selectedHost = null;
            int iters = RandomNumbers.randomIntBetween(getRandom(), 2, 10);
            for (int y = 0; y < iters; y++) {
                int statusCode = randomErrorNoRetryStatusCode(getRandom());
                Response response;
                try {
                    response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, new Request(randomHttpMethod(getRandom()), "/" + statusCode));
                } catch (ResponseException e) {
                    response = e.getResponse();
                }
                assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
                if (selectedHost == null) {
                    selectedHost = response.getHost();
                } else {
                    assertThat(response.getHost(), equalTo(selectedHost));
                }
            }
            failureListener.assertNotCalled();
            for (int y = 0; y < i + 1; y++) {
                retryEndpoint = randomErrorRetryEndpoint();
                try {
                    RestClientSingleHostTests.performRequestSyncOrAsync(restClient, new Request(randomHttpMethod(getRandom()), retryEndpoint));
                    fail("request should have failed");
                } catch (ResponseException e) {
                    Response response = e.getResponse();
                    assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
                    assertThat(response.getHost(), equalTo(selectedHost));
                    failureListener.assertCalled(selectedHost);
                } catch (IOException e) {
                    HttpHost httpHost = HttpHost.create(e.getMessage());
                    assertThat(httpHost, equalTo(selectedHost));
                    failureListener.assertCalled(selectedHost);
                }
            }
        }
    }
}
282621.844137elasticsearch
public void testPhrasePrefix() throws IOException {
    Settings.Builder builder = Settings.builder().put(indexSettings()).put("index.analysis.analyzer.synonym.tokenizer", "standard").putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase").put("index.analysis.filter.synonym.type", "synonym").putList("index.analysis.filter.synonym.synonyms", "quick => fast");
    assertAcked(prepareCreate("first_test_index").setSettings(builder.build()).setMapping(type1TermVectorMapping()));
    ensureGreen();
    prepareIndex("first_test_index").setId("0").setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get();
    prepareIndex("first_test_index").setId("1").setSource("field1", "The quick browse button is a fancy thing, right bro?").get();
    refresh();
    logger.info("--> highlighting and searching on field0");
    assertResponse(prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "bro")).highlighter(highlight().field("field0").order("score").preTags("<x>").postTags("</x>")), resp -> {
        assertHighlight(resp, 0, "field0", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
    });
    assertResponse(prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field0", "quick bro")).highlighter(highlight().field("field0").order("score").preTags("<x>").postTags("</x>")), resp -> {
        assertHighlight(resp, 0, "field0", 0, 1, equalTo("The <x>quick brown</x> fox jumps over the lazy dog"));
    });
    logger.info("--> highlighting and searching on field1");
    assertResponse(prepareSearch("first_test_index").setQuery(boolQuery().should(matchPhrasePrefixQuery("field1", "test")).should(matchPhrasePrefixQuery("field1", "bro"))).highlighter(highlight().field("field1").order("score").preTags("<x>").postTags("</x>")), resp -> {
        assertThat(resp.getHits().getTotalHits().value, equalTo(2L));
        for (int i = 0; i < 2; i++) {
            assertHighlight(resp, i, "field1", 0, 1, anyOf(equalTo("The quick <x>browse</x> button is a fancy thing, right <x>bro</x>?"), equalTo("The quick <x>brown</x> fox jumps over the lazy dog")));
        }
    });
    assertResponse(prepareSearch("first_test_index").setQuery(matchPhrasePrefixQuery("field1", "quick bro")).highlighter(highlight().field("field1").order("score").preTags("<x>").postTags("</x>")), resp -> {
        for (int i = 0; i < 2; i++) {
            assertHighlight(resp, i, "field1", 0, 1, anyOf(equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"), equalTo("The <x>quick brown</x> fox jumps over the lazy dog")));
        }
    });
    assertAcked(prepareCreate("second_test_index").setSettings(builder.build()).setMapping("field4", "type=text,term_vector=with_positions_offsets,analyzer=synonym", "field3", "type=text,analyzer=synonym"));
    prepareIndex("second_test_index").setId("0").setSource("type", "type2", "field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get();
    prepareIndex("second_test_index").setId("1").setSource("type", "type2", "field4", "The quick browse button is a fancy thing, right bro?").get();
    prepareIndex("second_test_index").setId("2").setSource("type", "type2", "field4", "a quick fast blue car").get();
    refresh();
    assertResponse(prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field3", "fast bro")).highlighter(highlight().field("field3").order("score").preTags("<x>").postTags("</x>")), resp -> {
        assertHighlight(resp, 0, "field3", 0, 1, equalTo("The <x>quick brown</x> fox jumps over the lazy dog"));
    });
    assertResponse(prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "the fast bro")).highlighter(highlight().field("field4").order("score").preTags("<x>").postTags("</x>")), resp -> {
        for (int i = 0; i < 2; i++) {
            assertHighlight(resp, i, "field4", 0, 1, anyOf(equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"), equalTo("<x>The quick brown</x> fox jumps over the lazy dog")));
        }
    });
    logger.info("--> highlighting and searching on field4");
    assertResponse(prepareSearch("second_test_index").setQuery(matchPhrasePrefixQuery("field4", "a fast quick blue ca")).setPostFilter(termQuery("type", "type2")).highlighter(highlight().field("field4").order("score").preTags("<x>").postTags("</x>")), resp -> {
        assertHighlight(resp, 0, "field4", 0, 1, anyOf(equalTo("<x>a quick fast blue car</x>"), equalTo("<x>a</x> <x>quick</x> <x>fast</x> <x>blue</x> <x>car</x>")));
    });
}
281860.324154elasticsearch
public void testMultiTermPrefix() throws IOException {
    testCase("quick brown fox jump lazy", shingleSize -> SearchAsYouTypeAnalyzer.withShingleAndPrefix(SIMPLE, shingleSize), shingleSize -> {
        if (shingleSize == 2) {
            final List<String> tokens = new ArrayList<>();
            tokens.addAll(asList("q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown"));
            tokens.addAll(asList("b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox"));
            tokens.addAll(asList("f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump"));
            tokens.addAll(asList("j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy"));
            tokens.addAll(asList("l", "la", "laz", "lazy"));
            tokens.addAll(tokenWithSpaces("lazy", shingleSize));
            return tokens;
        } else if (shingleSize == 3) {
            final List<String> tokens = new ArrayList<>();
            tokens.addAll(asList("q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", "quick brown ", "quick brown f", "quick brown fo", "quick brown fox"));
            tokens.addAll(asList("b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", "brown fox ju", "brown fox jum", "brown fox jump"));
            tokens.addAll(asList("f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", "fox jump laz", "fox jump lazy"));
            tokens.addAll(asList("j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy"));
            tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1));
            tokens.addAll(asList("l", "la", "laz", "lazy"));
            tokens.addAll(tokenWithSpaces("lazy", shingleSize));
            return tokens;
        } else if (shingleSize == 4) {
            final List<String> tokens = new ArrayList<>();
            tokens.addAll(asList("q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", "quick brown ", "quick brown f", "quick brown fo", "quick brown fox", "quick brown fox ", "quick brown fox j", "quick brown fox ju", "quick brown fox jum", "quick brown fox jump"));
            tokens.addAll(asList("b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", "brown fox ju", "brown fox jum", "brown fox jump", "brown fox jump ", "brown fox jump l", "brown fox jump la", "brown fox jump laz", "brown fox jump lazy"));
            tokens.addAll(asList("f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", "fox jump laz", "fox jump lazy"));
            tokens.addAll(tokenWithSpaces("fox jump lazy", shingleSize - 2));
            tokens.addAll(asList("j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy"));
            tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1));
            tokens.addAll(asList("l", "la", "laz", "lazy"));
            tokens.addAll(tokenWithSpaces("lazy", shingleSize));
            return tokens;
        }
        throw new IllegalArgumentException();
    });
}
281604.7611150elasticsearch
public void testCorruptTranslogTruncation() throws Exception {
    internalCluster().startNodes(2);
    final String node1 = internalCluster().getNodeNames()[0];
    final String node2 = internalCluster().getNodeNames()[1];
    final String indexName = "test";
    assertAcked(prepareCreate(indexName).setSettings(indexSettings(1, 1).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1").put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true).put("index.routing.allocation.exclude._name", node2)));
    ensureYellow();
    updateIndexSettings(Settings.builder().putNull("index.routing.allocation.exclude._name"), indexName);
    ensureGreen();
    int numDocsToKeep = randomIntBetween(10, 100);
    logger.info("--> indexing [{}] docs to be kept", numDocsToKeep);
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocsToKeep];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = prepareIndex(indexName).setSource("foo", "bar");
    }
    indexRandom(false, false, false, Arrays.asList(builders));
    flush(indexName);
    disableTranslogFlush(indexName);
    int numDocsToTruncate = randomBoolean() ? 0 : randomIntBetween(0, 100);
    logger.info("--> indexing [{}] more doc to be truncated", numDocsToTruncate);
    builders = new IndexRequestBuilder[numDocsToTruncate];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = prepareIndex(indexName).setSource("foo", "bar");
    }
    indexRandom(false, false, false, Arrays.asList(builders));
    RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand();
    MockTerminal terminal = MockTerminal.create();
    OptionParser parser = command.getParser();
    if (randomBoolean() && numDocsToTruncate > 0) {
        Index index = resolveIndex(indexName);
        IndexShard replica = internalCluster().getInstance(IndicesService.class, node2).getShardOrNull(new ShardId(index, 0));
        replica.flush(new FlushRequest());
        logger.info("--> performed extra flushing on replica");
    }
    final Settings node1PathSettings = internalCluster().dataPathSettings(node1);
    final Settings node2PathSettings = internalCluster().dataPathSettings(node2);
    internalCluster().stopNode(node2);
    final Path translogDir = getPathToShardData(indexName, ShardPath.TRANSLOG_FOLDER_NAME);
    final Path indexDir = getPathToShardData(indexName, ShardPath.INDEX_FOLDER_NAME);
    logger.info("--> restarting node");
    internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {

        @Override
        public Settings onNodeStopped(String nodeName) throws Exception {
            logger.info("--> corrupting translog on node {}", nodeName);
            TestTranslog.corruptRandomTranslogFile(logger, random(), translogDir);
            return super.onNodeStopped(nodeName);
        }
    });
    assertBusy(() -> {
        final UnassignedInfo unassignedInfo = clusterAdmin().prepareAllocationExplain().setIndex(indexName).setShard(0).setPrimary(true).get().getExplanation().getUnassignedInfo();
        assertThat(unassignedInfo.getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED));
        assertThat(ExceptionsHelper.unwrap(unassignedInfo.getFailure(), TranslogCorruptedException.class), not(nullValue()));
    });
    internalCluster().restartNode(node1, new InternalTestCluster.RestartCallback() {

        @Override
        public Settings onNodeStopped(String nodeName) throws Exception {
            assertBusy(() -> {
                logger.info("--> checking that lock has been released for {}", indexDir);
                try (Directory dir = FSDirectory.open(indexDir, NativeFSLockFactory.INSTANCE);
                    Lock ignored = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
                } catch (LockObtainFailedException lofe) {
                    logger.info("--> failed acquiring lock for {}", indexDir);
                    throw new AssertionError("still waiting for lock release at [" + indexDir + "]", lofe);
                } catch (IOException ioe) {
                    throw new AssertionError("unexpected IOException [" + indexDir + "]", ioe);
                }
            });
            final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build());
            terminal.addTextInput("y");
            OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString());
            final ProcessInfo processInfo = new ProcessInfo(Map.of(), Map.of(), createTempDir());
            logger.info("--> running command for [{}]", translogDir.toAbsolutePath());
            command.execute(terminal, options, environment, processInfo);
            logger.info("--> output:\n{}", terminal.getOutput());
            return super.onNodeStopped(nodeName);
        }
    });
    String primaryNodeId = null;
    final ClusterState state = clusterAdmin().prepareState().get().getState();
    final DiscoveryNodes nodes = state.nodes();
    for (Map.Entry<String, DiscoveryNode> cursor : nodes.getNodes().entrySet()) {
        final String name = cursor.getValue().getName();
        if (name.equals(node1)) {
            primaryNodeId = cursor.getKey();
            break;
        }
    }
    assertThat(primaryNodeId, notNullValue());
    assertThat(terminal.getOutput(), containsString("allocate_stale_primary"));
    assertThat(terminal.getOutput(), containsString("\"node\" : \"" + primaryNodeId + "\""));
    assertBusy(() -> {
        final ClusterAllocationExplanation explanation = clusterAdmin().prepareAllocationExplain().setIndex(indexName).setShard(0).setPrimary(true).get().getExplanation();
        final ShardAllocationDecision shardAllocationDecision = explanation.getShardAllocationDecision();
        assertThat(shardAllocationDecision.isDecisionTaken(), equalTo(true));
        assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(), equalTo(AllocationDecision.NO_VALID_SHARD_COPY));
    });
    clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true)).get();
    assertBusy(() -> {
        final ClusterAllocationExplanation explanation = clusterAdmin().prepareAllocationExplain().setIndex(indexName).setShard(0).setPrimary(true).get().getExplanation();
        assertThat(explanation.getCurrentNode(), notNullValue());
        assertThat(explanation.getShardState(), equalTo(ShardRoutingState.STARTED));
    });
    ensureYellow(indexName);
    assertHitCount(prepareSearch(indexName).setQuery(matchAllQuery()), numDocsToKeep);
    logger.info("--> starting the replica node to test recovery");
    internalCluster().startNode(node2PathSettings);
    ensureGreen(indexName);
    for (String node : internalCluster().nodesInclude(indexName)) {
        SearchRequestBuilder q = prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery());
        assertHitCount(q, numDocsToKeep);
    }
    final RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries(indexName).setActiveOnly(false).get();
    final RecoveryState replicaRecoveryState = recoveryResponse.shardRecoveryStates().get(indexName).stream().filter(recoveryState -> recoveryState.getPrimary() == false).findFirst().get();
    assertThat(replicaRecoveryState.getIndex().toString(), replicaRecoveryState.getIndex().recoveredFileCount(), greaterThan(0));
    final SeqNoStats seqNoStats = getSeqNoStats(indexName, 0);
    assertThat(seqNoStats.getGlobalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
    assertThat(seqNoStats.getLocalCheckpoint(), equalTo(seqNoStats.getMaxSeqNo()));
}
284312.791134elasticsearch
public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception {
    Settings settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 11).put(SETTING_NUMBER_OF_REPLICAS, 1).build();
    logger.info("Starting sever1");
    final String server_1 = internalCluster().startNode();
    final String node1 = getLocalNodeId(server_1);
    logger.info("Creating index [test]");
    CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet();
    assertAcked(createIndexResponse);
    ClusterState clusterState = clusterAdmin().prepareState().get().getState();
    RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
    assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
    logger.info("Starting server2");
    String server_2 = internalCluster().startNode();
    logger.info("Waiting for replicas to be assigned");
    ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get();
    logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final String node2 = getLocalNodeId(server_2);
    clusterAdmin().prepareReroute().get();
    clusterHealth = clusterAdmin().health(new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)).actionGet();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
    assertThat(clusterHealth.getInitializingShards(), equalTo(0));
    assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
    assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
    assertThat(clusterHealth.getActiveShards(), equalTo(22));
    assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
    clusterState = clusterAdmin().prepareState().get().getState();
    assertNodesPresent(clusterState.getRoutingNodes(), node1, node2);
    routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
    assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
    assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
    RoutingNode routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
    assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
    assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
    logger.info("Starting server3");
    String server_3 = internalCluster().startNode();
    logger.info("Waiting for replicas to be assigned");
    clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    final String node3 = getLocalNodeId(server_3);
    clusterAdmin().prepareReroute().get();
    clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
    assertThat(clusterHealth.getInitializingShards(), equalTo(0));
    assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
    assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
    assertThat(clusterHealth.getActiveShards(), equalTo(22));
    assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
    clusterState = clusterAdmin().prepareState().get().getState();
    assertNodesPresent(clusterState.getRoutingNodes(), node1, node2, node3);
    routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
    routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
    RoutingNode routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
    assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
    assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
    assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
    assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
    assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
    assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
    assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7));
    logger.info("Closing server1");
    internalCluster().stopNode(server_1);
    logger.info("Running Cluster Health");
    clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get();
    logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    clusterAdmin().prepareReroute().get();
    clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).get();
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
    assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
    assertThat(clusterHealth.getActiveShards(), equalTo(22));
    assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
    clusterState = clusterAdmin().prepareState().get().getState();
    assertNodesPresent(clusterState.getRoutingNodes(), node3, node2);
    routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
    routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
    assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
    assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
    assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
    assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
    assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
    logger.info("Deleting index [test]");
    AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").get();
    assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
    clusterState = clusterAdmin().prepareState().get().getState();
    assertNodesPresent(clusterState.getRoutingNodes(), node3, node2);
    routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
    assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
    routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
    assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
}
282491.537141elasticsearch
public void testRenameOnRestore() throws Exception {
    Client client = client();
    createRepository("test-repo", "fs");
    createIndex("test-idx-1", "test-idx-2", "test-idx-3");
    ensureGreen();
    assertAcked(client.admin().indices().prepareAliases().addAlias("test-idx-1", "alias-1", false).addAlias("test-idx-2", "alias-2", false).addAlias("test-idx-3", "alias-3", false));
    indexRandomDocs("test-idx-1", 100);
    indexRandomDocs("test-idx-2", 100);
    createSnapshot("test-repo", "test-snap", Arrays.asList("test-idx-1", "test-idx-2"));
    logger.info("--> restore indices with different names");
    RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    assertDocCount("test-idx-1-copy", 100L);
    assertDocCount("test-idx-2-copy", 100L);
    logger.info("--> close just restored indices");
    client.admin().indices().prepareClose("test-idx-1-copy", "test-idx-2-copy").get();
    logger.info("--> and try to restore these indices again");
    restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    assertDocCount("test-idx-1-copy", 100L);
    assertDocCount("test-idx-2-copy", 100L);
    logger.info("--> close indices");
    assertAcked(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy"));
    logger.info("--> restore indices with different names");
    restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+-2)").setRenameReplacement("$1-copy").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    logger.info("--> delete indices");
    cluster().wipeIndices("test-idx-1", "test-idx-1-copy", "test-idx-2", "test-idx-2-copy");
    logger.info("--> try renaming indices using the same name");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("same-name").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (SnapshotRestoreException ex) {
    }
    logger.info("--> try renaming indices using the same name");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("test-idx-2").setRenameReplacement("test-idx-1").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (SnapshotRestoreException ex) {
    }
    logger.info("--> try renaming indices using invalid index name");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("__WRONG__").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (InvalidIndexNameException ex) {
    }
    logger.info("--> try renaming indices into existing alias name");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("alias-3").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (InvalidIndexNameException ex) {
    }
    logger.info("--> try renaming indices into existing alias of itself");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (SnapshotRestoreException ex) {
    }
    logger.info("--> try renaming indices into existing alias of another restored index");
    try {
        client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1", "test-idx-2").setRenamePattern("test-idx-1").setRenameReplacement("alias-2").setWaitForCompletion(true).get();
        fail("Shouldn't be here");
    } catch (SnapshotRestoreException ex) {
    }
    logger.info("--> try renaming indices into existing alias of itself, but don't restore aliases ");
    restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias").setWaitForCompletion(true).setIncludeAliases(false).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
}
283489.4415110elasticsearch
public void testBasicWorkFlow() throws Exception {
    createRepository("test-repo", "fs");
    createIndexWithRandomDocs("test-idx-1", 100);
    createIndexWithRandomDocs("test-idx-2", 100);
    createIndexWithRandomDocs("test-idx-3", 100);
    ActionFuture<BroadcastResponse> flushResponseFuture = null;
    if (randomBoolean()) {
        ArrayList<String> indicesToFlush = new ArrayList<>();
        for (int i = 1; i < 4; i++) {
            if (randomBoolean()) {
                indicesToFlush.add("test-idx-" + i);
            }
        }
        if (indicesToFlush.isEmpty() == false) {
            logger.info("--> starting asynchronous flush for indices {}", indicesToFlush);
            flushResponseFuture = indicesAdmin().prepareFlush(indicesToFlush.toArray(Strings.EMPTY_ARRAY)).execute();
        }
    }
    final String[] indicesToSnapshot = { "test-idx-*", "-test-idx-3" };
    logger.info("--> capturing history UUIDs");
    final Map<ShardId, String> historyUUIDs = new HashMap<>();
    for (ShardStats shardStats : indicesAdmin().prepareStats(indicesToSnapshot).clear().get().getShards()) {
        String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY);
        ShardId shardId = shardStats.getShardRouting().shardId();
        if (historyUUIDs.containsKey(shardId)) {
            assertThat(shardStats.getShardRouting() + " has a different history uuid", historyUUID, equalTo(historyUUIDs.get(shardId)));
        } else {
            historyUUIDs.put(shardId, historyUUID);
        }
    }
    final boolean snapshotClosed = randomBoolean();
    if (snapshotClosed) {
        assertAcked(indicesAdmin().prepareClose(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get());
    }
    createSnapshot("test-repo", "test-snap", Arrays.asList(indicesToSnapshot));
    List<SnapshotInfo> snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")).get().getSnapshots();
    assertThat(snapshotInfos.size(), equalTo(1));
    SnapshotInfo snapshotInfo = snapshotInfos.get(0);
    assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS));
    assertThat(snapshotInfo.version(), equalTo(IndexVersion.current()));
    if (snapshotClosed) {
        assertAcked(indicesAdmin().prepareOpen(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get());
    }
    logger.info("--> delete some data");
    for (int i = 0; i < 50; i++) {
        client().prepareDelete("test-idx-1", Integer.toString(i)).get();
    }
    for (int i = 50; i < 100; i++) {
        client().prepareDelete("test-idx-2", Integer.toString(i)).get();
    }
    for (int i = 0; i < 100; i += 2) {
        client().prepareDelete("test-idx-3", Integer.toString(i)).get();
    }
    assertAllSuccessful(refresh());
    assertDocCount("test-idx-1", 50L);
    assertDocCount("test-idx-2", 50L);
    assertDocCount("test-idx-3", 50L);
    logger.info("--> close indices");
    indicesAdmin().prepareClose("test-idx-1", "test-idx-2").get();
    logger.info("--> restore all indices from the snapshot");
    RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    ensureGreen();
    assertDocCount("test-idx-1", 100L);
    assertDocCount("test-idx-2", 100L);
    assertDocCount("test-idx-3", 50L);
    assertNull(indicesAdmin().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()));
    for (ShardStats shardStats : indicesAdmin().prepareStats(indicesToSnapshot).clear().get().getShards()) {
        String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY);
        ShardId shardId = shardStats.getShardRouting().shardId();
        assertThat(shardStats.getShardRouting() + " doesn't have a history uuid", historyUUID, notNullValue());
        assertThat(shardStats.getShardRouting() + " doesn't have a new history", historyUUID, not(equalTo(historyUUIDs.get(shardId))));
    }
    logger.info("--> delete indices");
    cluster().wipeIndices("test-idx-1", "test-idx-2");
    logger.info("--> restore one index after deletion");
    restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").get();
    assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
    ensureGreen();
    assertDocCount("test-idx-1", 100);
    ClusterState clusterState = clusterAdmin().prepareState().get().getState();
    assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true));
    assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false));
    assertNull(indicesAdmin().prepareGetSettings("test-idx-1").get().getSetting("test-idx-1", MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()));
    for (ShardStats shardStats : indicesAdmin().prepareStats(indicesToSnapshot).clear().get().getShards()) {
        String historyUUID = shardStats.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY);
        ShardId shardId = shardStats.getShardRouting().shardId();
        assertThat(shardStats.getShardRouting() + " doesn't have a history uuid", historyUUID, notNullValue());
        assertThat(shardStats.getShardRouting() + " doesn't have a new history", historyUUID, not(equalTo(historyUUIDs.get(shardId))));
    }
    if (flushResponseFuture != null) {
        flushResponseFuture.actionGet();
    }
}
282220.8327108elasticsearch
 static RestResponse buildRestResponse(boolean aliasesExplicitlyRequested, String[] requestedAliases, Map<String, List<AliasMetadata>> responseAliasMap, Map<String, List<DataStreamAlias>> dataStreamAliases, XContentBuilder builder) throws Exception {
    final Set<String> indicesToDisplay = new HashSet<>();
    final Set<String> returnedAliasNames = new HashSet<>();
    for (final Map.Entry<String, List<AliasMetadata>> cursor : responseAliasMap.entrySet()) {
        for (final AliasMetadata aliasMetadata : cursor.getValue()) {
            if (aliasesExplicitlyRequested) {
                indicesToDisplay.add(cursor.getKey());
            }
            returnedAliasNames.add(aliasMetadata.alias());
        }
    }
    dataStreamAliases.entrySet().stream().flatMap(entry -> entry.getValue().stream()).forEach(dataStreamAlias -> returnedAliasNames.add(dataStreamAlias.getName()));
    final SortedSet<String> missingAliases = new TreeSet<>();
    int firstWildcardIndex = requestedAliases.length;
    for (int i = 0; i < requestedAliases.length; i++) {
        if (Regex.isSimpleMatchPattern(requestedAliases[i])) {
            firstWildcardIndex = i;
            break;
        }
    }
    for (int i = 0; i < requestedAliases.length; i++) {
        if (Metadata.ALL.equals(requestedAliases[i]) || Regex.isSimpleMatchPattern(requestedAliases[i]) || (i > firstWildcardIndex && requestedAliases[i].charAt(0) == '-')) {
            continue;
        }
        int j = Math.max(i + 1, firstWildcardIndex);
        for (; j < requestedAliases.length; j++) {
            if (requestedAliases[j].charAt(0) == '-') {
                if (Regex.simpleMatch(requestedAliases[j].substring(1), requestedAliases[i]) || Metadata.ALL.equals(requestedAliases[j].substring(1))) {
                    break;
                }
            }
        }
        if (j == requestedAliases.length) {
            if (false == returnedAliasNames.contains(requestedAliases[i])) {
                missingAliases.add(requestedAliases[i]);
            }
        }
    }
    final RestStatus status;
    builder.startObject();
    {
        if (missingAliases.isEmpty()) {
            status = RestStatus.OK;
        } else {
            status = RestStatus.NOT_FOUND;
            final String message;
            if (missingAliases.size() == 1) {
                message = String.format(Locale.ROOT, "alias [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
            } else {
                message = String.format(Locale.ROOT, "aliases [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases));
            }
            builder.field("error", message);
            builder.field("status", status.getStatus());
        }
        for (final var entry : responseAliasMap.entrySet()) {
            if (aliasesExplicitlyRequested == false || indicesToDisplay.contains(entry.getKey())) {
                builder.startObject(entry.getKey());
                {
                    builder.startObject("aliases");
                    {
                        for (final AliasMetadata alias : entry.getValue()) {
                            AliasMetadata.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS);
                        }
                    }
                    builder.endObject();
                }
                builder.endObject();
            }
        }
        for (var entry : dataStreamAliases.entrySet()) {
            builder.startObject(entry.getKey());
            {
                builder.startObject("aliases");
                {
                    for (DataStreamAlias alias : entry.getValue()) {
                        builder.startObject(alias.getName());
                        if (entry.getKey().equals(alias.getWriteDataStream())) {
                            builder.field("is_write_index", true);
                        }
                        if (alias.getFilter(entry.getKey()) != null) {
                            builder.field("filter", XContentHelper.convertToMap(alias.getFilter(entry.getKey()).uncompressed(), true).v2());
                        }
                        builder.endObject();
                    }
                }
                builder.endObject();
            }
            builder.endObject();
        }
    }
    builder.endObject();
    return new RestResponse(status, builder);
}
282699.323140elasticsearch
public void testMinimumVersionSameAsOldVersion() throws Exception {
    Version newVersion = Version.CURRENT;
    Version oldVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), VersionUtils.getPreviousVersion(newVersion));
    Version minVersion = oldVersion;
    final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
    AtomicInteger successfulOps = new AtomicInteger();
    Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
    DiscoveryNode newVersionNode = DiscoveryNodeUtils.builder("node1").version(newVersion).build();
    DiscoveryNode oldVersionNode = DiscoveryNodeUtils.builder("node2").version(oldVersion).build();
    lookup.put("node1", new SearchAsyncActionTests.MockConnection(newVersionNode));
    lookup.put("node2", new SearchAsyncActionTests.MockConnection(oldVersionNode));
    OriginalIndices idx = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS);
    ArrayList<SearchShardIterator> list = new ArrayList<>();
    ShardRouting routingNewVersionShard = ShardRouting.newUnassigned(new ShardId(new Index("idx", "_na_"), 0), true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), ShardRouting.Role.DEFAULT);
    routingNewVersionShard = routingNewVersionShard.initialize(newVersionNode.getId(), "p0", 0);
    routingNewVersionShard.started();
    list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 0), singletonList(routingNewVersionShard), idx));
    ShardRouting routingOldVersionShard = ShardRouting.newUnassigned(new ShardId(new Index("idx", "_na_"), 1), true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foobar"), ShardRouting.Role.DEFAULT);
    routingOldVersionShard = routingOldVersionShard.initialize(oldVersionNode.getId(), "p1", 0);
    routingOldVersionShard.started();
    list.add(new SearchShardIterator(null, new ShardId(new Index("idx", "_na_"), 1), singletonList(routingOldVersionShard), idx));
    GroupShardsIterator<SearchShardIterator> shardsIter = new GroupShardsIterator<>(list);
    final SearchRequest searchRequest = new SearchRequest(minVersion);
    searchRequest.allowPartialSearchResults(false);
    searchRequest.source(new SearchSourceBuilder().size(1).sort(SortBuilders.fieldSort("timestamp")));
    SearchTransportService searchTransportService = new SearchTransportService(null, null, null) {

        @Override
        public void sendExecuteQuery(Transport.Connection connection, ShardSearchRequest request, SearchTask task, SearchActionListener<? super SearchPhaseResult> listener) {
            int shardId = request.shardId().id();
            QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("N/A", 123), new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null), null);
            SortField sortField = new SortField("timestamp", SortField.Type.LONG);
            if (shardId == 0) {
                queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldDocs(new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, new SortField[] { sortField }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
            } else if (shardId == 1) {
                queryResult.topDocs(new TopDocsAndMaxScore(new TopFieldDocs(new TotalHits(1, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), new FieldDoc[] { new FieldDoc(randomInt(1000), Float.NaN, new Object[] { shardId }) }, new SortField[] { sortField }), Float.NaN), new DocValueFormat[] { DocValueFormat.RAW });
            }
            queryResult.from(0);
            queryResult.size(1);
            successfulOps.incrementAndGet();
            new Thread(() -> listener.onResponse(queryResult)).start();
        }
    };
    SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder());
    SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap());
    QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer(searchRequest, EsExecutors.DIRECT_EXECUTOR_SERVICE, new NoopCircuitBreaker(CircuitBreaker.REQUEST), controller, task::isCancelled, task.getProgressListener(), shardsIter.size(), exc -> {
    });
    CountDownLatch latch = new CountDownLatch(1);
    SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction(logger, null, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", AliasFilter.EMPTY), Collections.emptyMap(), EsExecutors.DIRECT_EXECUTOR_SERVICE, resultConsumer, searchRequest, null, shardsIter, timeProvider, new ClusterState.Builder(new ClusterName("test")).build(), task, SearchResponse.Clusters.EMPTY) {

        @Override
        protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
            return new SearchPhase("test") {

                @Override
                public void run() {
                    latch.countDown();
                }
            };
        }
    };
    action.start();
    latch.await();
    assertThat(successfulOps.get(), equalTo(2));
    SearchPhaseController.ReducedQueryPhase phase = action.results.reduce();
    assertThat(phase.numReducePhases(), greaterThanOrEqualTo(1));
    assertThat(phase.totalHits().value, equalTo(2L));
    assertThat(phase.totalHits().relation, equalTo(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO));
}
283491.210126elasticsearch
public void testEnableClusterBalance() {
    final boolean useClusterSetting = randomBoolean();
    final Rebalance allowedOnes = RandomPicks.randomFrom(random(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL));
    Settings build = Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE : RandomPicks.randomFrom(random(), Rebalance.values())).put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3).put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), 10).build();
    ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    AllocationService strategy = createAllocationService(build, clusterSettings);
    Settings indexSettings = useClusterSetting ? Settings.EMPTY : Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE).build();
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current()).put(indexSettings)).numberOfShards(3).numberOfReplicas(1)).put(IndexMetadata.builder("always_disabled").settings(settings(IndexVersion.current()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).addAsNew(metadata.index("always_disabled")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    logger.info("--> adding one nodes and do rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(4));
    logger.info("--> start the shards (primaries)");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(4));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(4));
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(8));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(0));
    logger.info("--> adding one nodes and do rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(8));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), RELOCATING).size(), equalTo(0));
    if (useClusterSetting) {
        clusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).transientSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build())).build();
    } else {
        IndexMetadata meta = clusterState.getMetadata().index("test");
        IndexMetadata meta1 = clusterState.getMetadata().index("always_disabled");
        clusterState = ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).removeAllIndices().put(IndexMetadata.builder(meta1)).put(IndexMetadata.builder(meta).settings(Settings.builder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes).build()))).build();
    }
    clusterSettings.applySettings(clusterState.metadata().settings());
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(6));
    assertThat("expected 2 shards to relocate useClusterSettings: " + useClusterSetting, shardsWithState(clusterState.getRoutingNodes(), RELOCATING).size(), equalTo(2));
    List<ShardRouting> mutableShardRoutings = shardsWithState(clusterState.getRoutingNodes(), RELOCATING);
    switch(allowedOnes) {
        case PRIMARIES:
            for (ShardRouting routing : mutableShardRoutings) {
                assertTrue("only primaries are allowed to relocate", routing.primary());
                assertThat("only test index can rebalance", routing.getIndexName(), equalTo("test"));
            }
            break;
        case REPLICAS:
            for (ShardRouting routing : mutableShardRoutings) {
                assertFalse("only replicas are allowed to relocate", routing.primary());
                assertThat("only test index can rebalance", routing.getIndexName(), equalTo("test"));
            }
            break;
        case ALL:
            for (ShardRouting routing : mutableShardRoutings) {
                assertThat("only test index can rebalance", routing.getIndexName(), equalTo("test"));
            }
            break;
        default:
            fail("only replicas, primaries or all are allowed");
    }
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(8));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(0));
}
283471.951140elasticsearch
public void testMessages() {
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    RoutingNode newNode = RoutingNodesHelper.routingNode("newNode", newNode("newNode", Version.CURRENT, IndexVersion.current()));
    RoutingNode oldNode = RoutingNodesHelper.routingNode("oldNode", newNode("oldNode", VersionUtils.getPreviousVersion(), IndexVersionUtils.getPreviousVersion()));
    final ClusterName clusterName = ClusterName.DEFAULT;
    ClusterState clusterState = ClusterState.builder(clusterName).metadata(metadata).routingTable(initialRoutingTable).nodes(DiscoveryNodes.builder().add(newNode.node()).add(oldNode.node())).build();
    final ShardId shardId = clusterState.routingTable().index("test").shard(0).shardId();
    final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard();
    final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().get(0);
    RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState, null, null, 0);
    routingAllocation.debugDecision(true);
    final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider();
    Decision decision = allocationDecider.canAllocate(primaryShard, newNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.YES));
    assertThat(decision.getExplanation(), is("the primary shard is new or already existed on the node"));
    decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "oldNode"), newNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.YES));
    assertThat(decision.getExplanation(), is("can relocate primary shard from a node with version [" + oldNode.node().getVersion() + "] to a node with equal-or-newer version [" + newNode.node().getVersion() + "]"));
    decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "newNode"), oldNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.NO));
    assertThat(decision.getExplanation(), is("cannot relocate primary shard from a node with version [" + newNode.node().getVersion() + "] to a node with older version [" + oldNode.node().getVersion() + "]"));
    final IndexId indexId = new IndexId("test", UUIDs.randomBase64UUID(random()));
    final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource(UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), newNode.node().getMaxIndexVersion(), indexId);
    final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource(UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), oldNode.node().getMaxIndexVersion(), indexId);
    decision = allocationDecider.canAllocate(ShardRoutingHelper.newWithRestoreSource(primaryShard, newVersionSnapshot), oldNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.NO));
    assertThat(decision.getExplanation(), is("max supported index version [" + oldNode.node().getMaxIndexVersion().toReleaseVersion() + "] is older than the snapshot version [" + newNode.node().getMaxIndexVersion().toReleaseVersion() + "]"));
    decision = allocationDecider.canAllocate(ShardRoutingHelper.newWithRestoreSource(primaryShard, oldVersionSnapshot), newNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.YES));
    assertThat(decision.getExplanation(), is("max supported index version [" + newNode.node().getMaxIndexVersion().toReleaseVersion() + "] is the same or newer than snapshot version [" + oldNode.node().getMaxIndexVersion().toReleaseVersion() + "]"));
    final RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver() {
    };
    final RoutingNodes routingNodes = clusterState.mutableRoutingNodes();
    final ShardRouting startedPrimary = routingNodes.startShard(logger, routingNodes.initializeShard(primaryShard, "newNode", null, 0, routingChangesObserver), routingChangesObserver, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
    routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, null, 0);
    routingAllocation.debugDecision(true);
    decision = allocationDecider.canAllocate(replicaShard, oldNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.NO));
    assertThat(decision.getExplanation(), is("cannot allocate replica shard to a node with version [" + oldNode.node().getVersion() + "] since this is older than the primary version [" + newNode.node().getVersion() + "]"));
    routingNodes.startShard(logger, routingNodes.relocateShard(startedPrimary, "oldNode", 0, routingChangesObserver).v2(), routingChangesObserver, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
    routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, null, 0);
    routingAllocation.debugDecision(true);
    decision = allocationDecider.canAllocate(replicaShard, newNode, routingAllocation);
    assertThat(decision.type(), is(Decision.Type.YES));
    assertThat(decision.getExplanation(), is("can allocate replica shard to a node with version [" + newNode.node().getVersion() + "] since this is equal-or-newer than the primary version [" + oldNode.node().getVersion() + "]"));
}
286049.895113elasticsearch
public void testSingleIndexFirstStartPrimaryThenBackups() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put("cluster.routing.allocation.node_initial_primaries_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).put("cluster.routing.allocation.balance.index", 0.0f).put("cluster.routing.allocation.balance.replica", 1.0f).put("cluster.routing.allocation.balance.primary", 0.0f).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(10).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue());
    }
    logger.info("Adding one node and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
    }
    logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the primary shard (on node1)");
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node1"));
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
    }
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the backup shard");
    routingNodes = clusterState.getRoutingNodes();
    newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node2"));
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
    }
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
    logger.info("Add another node and perform rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED) + routingNodes.node("node1").numberOfShardsWithState(RELOCATING), equalTo(10));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(10));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED) + routingNodes.node("node2").numberOfShardsWithState(RELOCATING), equalTo(10));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(6));
    logger.info("Start the shards on node 3");
    routingNodes = clusterState.getRoutingNodes();
    newState = startInitializingShardsAndReroute(strategy, clusterState, routingNodes.node("node3"));
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(10));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(7));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(7));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6));
}
286473.981114elasticsearch
public void testUpdateNumberOfReplicas() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    assertThat(initialRoutingTable.index("test").size(), equalTo(1));
    assertThat(initialRoutingTable.index("test").shard(0).size(), equalTo(2));
    assertThat(initialRoutingTable.index("test").shard(0).size(), equalTo(2));
    assertThat(initialRoutingTable.index("test").shard(0).shard(0).state(), equalTo(UNASSIGNED));
    assertThat(initialRoutingTable.index("test").shard(0).shard(1).state(), equalTo(UNASSIGNED));
    assertThat(initialRoutingTable.index("test").shard(0).shard(0).currentNodeId(), nullValue());
    assertThat(initialRoutingTable.index("test").shard(0).shard(1).currentNodeId(), nullValue());
    logger.info("Adding two nodes and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    logger.info("Start all the primary shards");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState);
    logger.info("Start all the replica shards");
    ClusterState newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    final String nodeHoldingPrimary = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
    final String nodeHoldingReplica = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
    assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
    assertThat(clusterState.routingTable().index("test").size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
    logger.info("add another replica");
    final String[] indices = { "test" };
    RoutingTable updatedRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState.routingTable()).updateNumberOfReplicas(2, indices).build();
    metadata = Metadata.builder(clusterState.metadata()).updateNumberOfReplicas(2, indices).build();
    clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metadata(metadata).build();
    assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
    logger.info("Add another node and start the added replica");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(3));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
    logger.info("now remove a replica");
    updatedRoutingTable = RoutingTable.builder(clusterState.routingTable()).updateNumberOfReplicas(1, indices).build();
    metadata = Metadata.builder(clusterState.metadata()).updateNumberOfReplicas(1, indices).build();
    clusterState = ClusterState.builder(clusterState).routingTable(updatedRoutingTable).metadata(metadata).build();
    assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).size(), equalTo(2));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().size(), equalTo(1));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
    assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
    logger.info("do a reroute, should remain the same");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
}
283642.261128elasticsearch
public void testEpochMillisParser() {
    DateFormatter formatter = DateFormatters.forPattern("epoch_millis");
    {
        Instant instant = Instant.from(formatter.parse("12345"));
        assertThat(instant.getEpochSecond(), is(12L));
        assertThat(instant.getNano(), is(345_000_000));
        assertThat(formatter.format(instant), is("12345"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("0"));
        assertThat(instant.getEpochSecond(), is(0L));
        assertThat(instant.getNano(), is(0));
        assertThat(formatter.format(instant), is("0"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("0.1"));
        assertThat(instant.getEpochSecond(), is(0L));
        assertThat(instant.getNano(), is(100_000));
        assertThat(formatter.format(instant), is("0.1"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("123.123456"));
        assertThat(instant.getEpochSecond(), is(0L));
        assertThat(instant.getNano(), is(123123456));
        assertThat(formatter.format(instant), is("123.123456"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-123.123456"));
        assertThat(instant.getEpochSecond(), is(-1L));
        assertThat(instant.getNano(), is(876876544));
        assertThat(formatter.format(instant), is("-123.123456"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6789123.123456"));
        assertThat(instant.getEpochSecond(), is(-6790L));
        assertThat(instant.getNano(), is(876876544));
        assertThat(formatter.format(instant), is("-6789123.123456"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("6789123.123456"));
        assertThat(instant.getEpochSecond(), is(6789L));
        assertThat(instant.getNano(), is(123123456));
        assertThat(formatter.format(instant), is("6789123.123456"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000430768.25"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(231750000));
        assertThat(formatter.format(instant), is("-6250000430768.25"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000430768.75"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(231250000));
        assertThat(formatter.format(instant), is("-6250000430768.75"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000430768.00"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(232000000));
        assertThat(formatter.format(instant), is("-6250000430768"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000431000.250000"));
        assertThat(instant.getEpochSecond(), is(-6250000432L));
        assertThat(instant.getNano(), is(999750000));
        assertThat(formatter.format(instant), is("-6250000431000.25"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000431000.000001"));
        assertThat(instant.getEpochSecond(), is(-6250000432L));
        assertThat(instant.getNano(), is(999999999));
        assertThat(formatter.format(instant), is("-6250000431000.000001"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000431000.75"));
        assertThat(instant.getEpochSecond(), is(-6250000432L));
        assertThat(instant.getNano(), is(999250000));
        assertThat(formatter.format(instant), is("-6250000431000.75"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000431000.00"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(0));
        assertThat(formatter.format(instant), is("-6250000431000"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000431000"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(0));
        assertThat(formatter.format(instant), is("-6250000431000"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-6250000430768"));
        assertThat(instant.getEpochSecond(), is(-6250000431L));
        assertThat(instant.getNano(), is(232000000));
        assertThat(formatter.format(instant), is("-6250000430768"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("1680000430768"));
        assertThat(instant.getEpochSecond(), is(1680000430L));
        assertThat(instant.getNano(), is(768000000));
        assertThat(formatter.format(instant), is("1680000430768"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
    {
        Instant instant = Instant.from(formatter.parse("-0.12345"));
        assertThat(instant.getEpochSecond(), is(-1L));
        assertThat(instant.getNano(), is(999876550));
        assertThat(formatter.format(instant), is("-0.12345"));
        assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant));
    }
}
283336.98127elasticsearch
public void testDataOnlyNodePersistence() throws Exception {
    final List<Closeable> cleanup = new ArrayList<>(2);
    try {
        DiscoveryNode localNode = DiscoveryNodeUtils.builder("node1").roles(Sets.newHashSet(DiscoveryNodeRole.DATA_ROLE)).build();
        Settings settings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterName.value()).put(nonMasterNode()).put(Node.NODE_NAME_SETTING.getKey(), "test").build();
        final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode);
        cleanup.add(gateway);
        final TransportService transportService = mock(TransportService.class);
        TestThreadPool threadPool = new TestThreadPool("testMarkAcceptedConfigAsCommittedOnDataOnlyNode");
        cleanup.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
        when(transportService.getThreadPool()).thenReturn(threadPool);
        ClusterService clusterService = mock(ClusterService.class);
        when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
        final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService(nodeEnvironment, xContentRegistry(), new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L);
        gateway.start(settings, transportService, clusterService, new MetaStateService(nodeEnvironment, xContentRegistry()), null, null, persistedClusterStateService, List.of(), CompatibilityVersionsUtils.staticCurrent());
        final CoordinationState.PersistedState persistedState = gateway.getPersistedState();
        assertThat(persistedState, instanceOf(GatewayMetaState.AsyncPersistedState.class));
        CoordinationMetadata coordinationMetadata;
        do {
            coordinationMetadata = createCoordinationMetadata(randomNonNegativeLong());
        } while (coordinationMetadata.getLastAcceptedConfiguration().equals(coordinationMetadata.getLastCommittedConfiguration()));
        ClusterState state = createClusterState(randomNonNegativeLong(), Metadata.builder().coordinationMetadata(coordinationMetadata).clusterUUID(randomAlphaOfLength(10)).build());
        persistedState.setCurrentTerm(state.term());
        persistedState.setLastAcceptedState(state);
        assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten()), 30, TimeUnit.SECONDS);
        assertThat(persistedState.getLastAcceptedState().getLastAcceptedConfiguration(), not(equalTo(persistedState.getLastAcceptedState().getLastCommittedConfiguration())));
        CoordinationMetadata persistedCoordinationMetadata = persistedClusterStateService.loadBestOnDiskState(false).metadata.coordinationMetadata();
        assertThat(persistedCoordinationMetadata.getLastAcceptedConfiguration(), equalTo(GatewayMetaState.AsyncPersistedState.staleStateConfiguration));
        assertThat(persistedCoordinationMetadata.getLastCommittedConfiguration(), equalTo(GatewayMetaState.AsyncPersistedState.staleStateConfiguration));
        persistedState.markLastAcceptedStateAsCommitted();
        assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten()), 30, TimeUnit.SECONDS);
        CoordinationMetadata expectedCoordinationMetadata = CoordinationMetadata.builder(coordinationMetadata).lastCommittedConfiguration(coordinationMetadata.getLastAcceptedConfiguration()).build();
        ClusterState expectedClusterState = ClusterState.builder(state).metadata(Metadata.builder().coordinationMetadata(expectedCoordinationMetadata).clusterUUID(state.metadata().clusterUUID()).clusterUUIDCommitted(true).build()).build();
        assertClusterStateEqual(expectedClusterState, persistedState.getLastAcceptedState());
        persistedCoordinationMetadata = persistedClusterStateService.loadBestOnDiskState(false).metadata.coordinationMetadata();
        assertThat(persistedCoordinationMetadata.getLastAcceptedConfiguration(), equalTo(GatewayMetaState.AsyncPersistedState.staleStateConfiguration));
        assertThat(persistedCoordinationMetadata.getLastCommittedConfiguration(), equalTo(GatewayMetaState.AsyncPersistedState.staleStateConfiguration));
        assertTrue(persistedClusterStateService.loadBestOnDiskState(false).metadata.clusterUUIDCommitted());
        final String indexName = randomAlphaOfLength(10);
        long currentTerm = state.term();
        boolean wroteState = false;
        final int iterations = randomIntBetween(1, 1000);
        for (int i = 0; i < iterations; i++) {
            final boolean mustWriteState = wroteState == false && i == iterations - 1;
            if (rarely() && mustWriteState == false) {
                currentTerm = currentTerm + (rarely() ? randomIntBetween(1, 5) : 0L);
                persistedState.setCurrentTerm(currentTerm);
            } else {
                final int numberOfShards = randomIntBetween(1, 5);
                final long term = Math.min(state.term() + (rarely() ? randomIntBetween(1, 5) : 0L), currentTerm);
                final IndexMetadata indexMetadata = createIndexMetadata(indexName, numberOfShards, i);
                state = createClusterState(state.version() + 1, Metadata.builder().coordinationMetadata(createCoordinationMetadata(term)).put(indexMetadata, false).build());
                persistedState.setLastAcceptedState(state);
                wroteState = true;
            }
        }
        assertTrue(wroteState);
        assertEquals(currentTerm, persistedState.getCurrentTerm());
        assertClusterStateEqual(state, persistedState.getLastAcceptedState());
        assertBusy(() -> assertTrue(gateway.allPendingAsyncStatesWritten()), 30, TimeUnit.SECONDS);
        gateway.close();
        assertTrue(cleanup.remove(gateway));
        try (CoordinationState.PersistedState reloadedPersistedState = newGatewayPersistedState()) {
            assertEquals(currentTerm, reloadedPersistedState.getCurrentTerm());
            assertClusterStateEqual(GatewayMetaState.AsyncPersistedState.resetVotingConfiguration(state), reloadedPersistedState.getLastAcceptedState());
            assertNotNull(reloadedPersistedState.getLastAcceptedState().metadata().index(indexName));
        }
    } finally {
        IOUtils.close(cleanup);
    }
}
283895.086124elasticsearch
public void testToXContent() throws Exception {
    String name = randomAlphaOfLength(10);
    HealthStatus status = randomFrom(HealthStatus.RED, HealthStatus.YELLOW, HealthStatus.GREEN);
    String symptom = randomAlphaOfLength(20);
    Map<String, Object> detailsMap = new HashMap<>();
    detailsMap.put("key", "value");
    HealthIndicatorDetails details = new SimpleHealthIndicatorDetails(detailsMap);
    List<HealthIndicatorImpact> impacts = new ArrayList<>();
    String impact1Id = randomAlphaOfLength(30);
    int impact1Severity = randomIntBetween(1, 5);
    String impact1Description = randomAlphaOfLength(30);
    ImpactArea firstImpactArea = randomFrom(ImpactArea.values());
    impacts.add(new HealthIndicatorImpact(name, impact1Id, impact1Severity, impact1Description, List.of(firstImpactArea)));
    String impact2Id = randomAlphaOfLength(30);
    int impact2Severity = randomIntBetween(1, 5);
    String impact2Description = randomAlphaOfLength(30);
    ImpactArea secondImpactArea = randomFrom(ImpactArea.values());
    impacts.add(new HealthIndicatorImpact(name, impact2Id, impact2Severity, impact2Description, List.of(secondImpactArea)));
    List<Diagnosis> diagnosisList = new ArrayList<>();
    Diagnosis.Resource resource1 = new Diagnosis.Resource(Diagnosis.Resource.Type.INDEX, List.of(randomAlphaOfLength(10)));
    Diagnosis diagnosis1 = new Diagnosis(new Diagnosis.Definition(name, randomAlphaOfLength(30), randomAlphaOfLength(50), randomAlphaOfLength(50), randomAlphaOfLength(30)), List.of(resource1));
    diagnosisList.add(diagnosis1);
    Diagnosis.Resource resource2 = new Diagnosis.Resource(Diagnosis.Resource.Type.INDEX, List.of(randomAlphaOfLength(10)));
    Diagnosis diagnosis2 = new Diagnosis(new Diagnosis.Definition(name, randomAlphaOfLength(30), randomAlphaOfLength(50), randomAlphaOfLength(50), randomAlphaOfLength(30)), List.of(resource2));
    diagnosisList.add(diagnosis2);
    HealthIndicatorResult result = new HealthIndicatorResult(name, status, symptom, details, impacts, diagnosisList);
    XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
    result.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> {
        try {
            xcontent.toXContent(builder, ToXContent.EMPTY_PARAMS);
        } catch (IOException e) {
            logger.error(e.getMessage(), e);
            fail(e.getMessage());
        }
    });
    Map<String, Object> xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2();
    assertEquals(status.xContentValue(), xContentMap.get("status"));
    assertEquals(symptom, xContentMap.get("symptom"));
    assertEquals(detailsMap, xContentMap.get("details"));
    List<Map<String, Object>> expectedImpacts = new ArrayList<>();
    Map<String, Object> expectedImpact1 = new HashMap<>();
    expectedImpact1.put("id", HEALTH_API_ID_PREFIX + name + ":impact:" + impact1Id);
    expectedImpact1.put("severity", impact1Severity);
    expectedImpact1.put("description", impact1Description);
    expectedImpact1.put("impact_areas", List.of(firstImpactArea.displayValue()));
    Map<String, Object> expectedImpact2 = new HashMap<>();
    expectedImpact2.put("id", HEALTH_API_ID_PREFIX + name + ":impact:" + impact2Id);
    expectedImpact2.put("severity", impact2Severity);
    expectedImpact2.put("description", impact2Description);
    expectedImpact2.put("impact_areas", List.of(secondImpactArea.displayValue()));
    expectedImpacts.add(expectedImpact1);
    expectedImpacts.add(expectedImpact2);
    assertEquals(expectedImpacts, xContentMap.get("impacts"));
    List<Map<String, Object>> expectedDiagnosis = new ArrayList<>();
    {
        Map<String, Object> expectedDiagnosis1 = new HashMap<>();
        expectedDiagnosis1.put("id", HEALTH_API_ID_PREFIX + name + ":diagnosis:" + diagnosis1.definition().id());
        expectedDiagnosis1.put("cause", diagnosis1.definition().cause());
        expectedDiagnosis1.put("action", diagnosis1.definition().action());
        expectedDiagnosis1.put("help_url", diagnosis1.definition().helpURL());
        if (diagnosis1.affectedResources() != null) {
            XContentBuilder diagnosisXContent = XContentFactory.jsonBuilder().prettyPrint();
            diagnosis1.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> {
                try {
                    xcontent.toXContent(diagnosisXContent, ToXContent.EMPTY_PARAMS);
                } catch (IOException e) {
                    logger.error(e.getMessage(), e);
                    fail(e.getMessage());
                }
            });
            expectedDiagnosis1.put("affected_resources", XContentHelper.convertToMap(BytesReference.bytes(diagnosisXContent), false, builder.contentType()).v2().get("affected_resources"));
        }
        expectedDiagnosis.add(expectedDiagnosis1);
    }
    {
        Map<String, Object> expectedDiagnosis2 = new HashMap<>();
        expectedDiagnosis2.put("id", HEALTH_API_ID_PREFIX + name + ":diagnosis:" + diagnosis2.definition().id());
        expectedDiagnosis2.put("cause", diagnosis2.definition().cause());
        expectedDiagnosis2.put("action", diagnosis2.definition().action());
        expectedDiagnosis2.put("help_url", diagnosis2.definition().helpURL());
        if (diagnosis2.affectedResources() != null) {
            XContentBuilder diagnosisXContent = XContentFactory.jsonBuilder().prettyPrint();
            diagnosis2.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> {
                try {
                    xcontent.toXContent(diagnosisXContent, ToXContent.EMPTY_PARAMS);
                } catch (IOException e) {
                    logger.error(e.getMessage(), e);
                    fail(e.getMessage());
                }
            });
            expectedDiagnosis2.put("affected_resources", XContentHelper.convertToMap(BytesReference.bytes(diagnosisXContent), false, builder.contentType()).v2().get("affected_resources"));
        }
        expectedDiagnosis.add(expectedDiagnosis2);
    }
    assertEquals(expectedDiagnosis, xContentMap.get("diagnosis"));
}
28404.026243elasticsearch
public void testListenersAreExecuted() {
    AtomicInteger preQuery = new AtomicInteger();
    AtomicInteger failedQuery = new AtomicInteger();
    AtomicInteger onQuery = new AtomicInteger();
    AtomicInteger onFetch = new AtomicInteger();
    AtomicInteger preFetch = new AtomicInteger();
    AtomicInteger failedFetch = new AtomicInteger();
    AtomicInteger newContext = new AtomicInteger();
    AtomicInteger freeContext = new AtomicInteger();
    AtomicInteger newScrollContext = new AtomicInteger();
    AtomicInteger freeScrollContext = new AtomicInteger();
    AtomicInteger validateSearchContext = new AtomicInteger();
    AtomicInteger timeInNanos = new AtomicInteger(randomIntBetween(0, 10));
    SearchOperationListener listener = new SearchOperationListener() {

        @Override
        public void onPreQueryPhase(SearchContext searchContext) {
            assertNotNull(searchContext);
            preQuery.incrementAndGet();
        }

        @Override
        public void onFailedQueryPhase(SearchContext searchContext) {
            assertNotNull(searchContext);
            failedQuery.incrementAndGet();
        }

        @Override
        public void onQueryPhase(SearchContext searchContext, long tookInNanos) {
            assertEquals(timeInNanos.get(), tookInNanos);
            assertNotNull(searchContext);
            onQuery.incrementAndGet();
        }

        @Override
        public void onPreFetchPhase(SearchContext searchContext) {
            assertNotNull(searchContext);
            preFetch.incrementAndGet();
        }

        @Override
        public void onFailedFetchPhase(SearchContext searchContext) {
            assertNotNull(searchContext);
            failedFetch.incrementAndGet();
        }

        @Override
        public void onFetchPhase(SearchContext searchContext, long tookInNanos) {
            assertEquals(timeInNanos.get(), tookInNanos);
            onFetch.incrementAndGet();
        }

        @Override
        public void onNewReaderContext(ReaderContext readerContext) {
            assertNotNull(readerContext);
            newContext.incrementAndGet();
        }

        @Override
        public void onFreeReaderContext(ReaderContext readerContext) {
            assertNotNull(readerContext);
            freeContext.incrementAndGet();
        }

        @Override
        public void onNewScrollContext(ReaderContext readerContext) {
            assertNotNull(readerContext);
            newScrollContext.incrementAndGet();
        }

        @Override
        public void onFreeScrollContext(ReaderContext readerContext) {
            assertNotNull(readerContext);
            freeScrollContext.incrementAndGet();
        }

        @Override
        public void validateReaderContext(ReaderContext readerContext, TransportRequest request) {
            assertNotNull(readerContext);
            validateSearchContext.incrementAndGet();
        }
    };
    SearchOperationListener throwingListener = (SearchOperationListener) Proxy.newProxyInstance(SearchOperationListener.class.getClassLoader(), new Class<?>[] { SearchOperationListener.class }, (prox, method, args) -> {
        if (method.getName().equals("toString")) {
            return "test-listener";
        }
        throw new RuntimeException();
    });
    int throwingListeners = 0;
    final List<SearchOperationListener> indexingOperationListeners = new ArrayList<>(Arrays.asList(listener, listener));
    if (randomBoolean()) {
        indexingOperationListeners.add(throwingListener);
        throwingListeners++;
        if (randomBoolean()) {
            indexingOperationListeners.add(throwingListener);
            throwingListeners++;
        }
    }
    Collections.shuffle(indexingOperationListeners, random());
    SearchOperationListener.CompositeListener compositeListener = new SearchOperationListener.CompositeListener(indexingOperationListeners, logger);
    try (SearchContext ctx = new TestSearchContext((SearchExecutionContext) null)) {
        compositeListener.onQueryPhase(ctx, timeInNanos.get());
        assertEquals(0, preFetch.get());
        assertEquals(0, preQuery.get());
        assertEquals(0, failedFetch.get());
        assertEquals(0, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(0, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onFetchPhase(ctx, timeInNanos.get());
        assertEquals(0, preFetch.get());
        assertEquals(0, preQuery.get());
        assertEquals(0, failedFetch.get());
        assertEquals(0, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onPreQueryPhase(ctx);
        assertEquals(0, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(0, failedFetch.get());
        assertEquals(0, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onPreFetchPhase(ctx);
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(0, failedFetch.get());
        assertEquals(0, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onFailedFetchPhase(ctx);
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(0, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onFailedQueryPhase(ctx);
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(0, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onNewReaderContext(mock(ReaderContext.class));
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(2, newContext.get());
        assertEquals(0, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onNewScrollContext(mock(ReaderContext.class));
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(2, newContext.get());
        assertEquals(2, newScrollContext.get());
        assertEquals(0, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onFreeReaderContext(mock(ReaderContext.class));
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(2, newContext.get());
        assertEquals(2, newScrollContext.get());
        assertEquals(2, freeContext.get());
        assertEquals(0, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        compositeListener.onFreeScrollContext(mock(ReaderContext.class));
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(2, newContext.get());
        assertEquals(2, newScrollContext.get());
        assertEquals(2, freeContext.get());
        assertEquals(2, freeScrollContext.get());
        assertEquals(0, validateSearchContext.get());
        if (throwingListeners == 0) {
            compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE);
        } else {
            RuntimeException expected = expectThrows(RuntimeException.class, () -> compositeListener.validateReaderContext(mock(ReaderContext.class), Empty.INSTANCE));
            assertNull(expected.getMessage());
            assertEquals(throwingListeners - 1, expected.getSuppressed().length);
            if (throwingListeners > 1) {
                assertThat(expected.getSuppressed()[0], not(sameInstance(expected)));
            }
        }
        assertEquals(2, preFetch.get());
        assertEquals(2, preQuery.get());
        assertEquals(2, failedFetch.get());
        assertEquals(2, failedQuery.get());
        assertEquals(2, onQuery.get());
        assertEquals(2, onFetch.get());
        assertEquals(2, newContext.get());
        assertEquals(2, newScrollContext.get());
        assertEquals(2, freeContext.get());
        assertEquals(2, freeScrollContext.get());
        assertEquals(2, validateSearchContext.get());
    }
}
284346.969116elasticsearch
public void testDismaxQuery() throws IOException {
    Directory dir = newDirectory();
    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
    String[] username = new String[] { "foo fighters", "some cool fan", "cover band" };
    String[] song = new String[] { "generator", "foo fighers - generator", "foo fighters generator" };
    final boolean omitNorms = random().nextBoolean();
    final boolean omitFreqs = random().nextBoolean();
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(omitFreqs ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS);
    ft.setOmitNorms(omitNorms);
    ft.freeze();
    for (int i = 0; i < username.length; i++) {
        Document d = new Document();
        d.add(new TextField("id", Integer.toString(i), Field.Store.YES));
        d.add(new Field("username", username[i], ft));
        d.add(new Field("song", song[i], ft));
        w.addDocument(d);
    }
    int iters = scaledRandomIntBetween(25, 100);
    for (int j = 0; j < iters; j++) {
        Document d = new Document();
        d.add(new TextField("id", Integer.toString(username.length + j), Field.Store.YES));
        d.add(new Field("username", "foo fighters", ft));
        d.add(new Field("song", "some bogus text to bump up IDF", ft));
        w.addDocument(d);
    }
    w.commit();
    DirectoryReader reader = DirectoryReader.open(w);
    IndexSearcher searcher = setSimilarity(newSearcher(reader));
    {
        String[] fields = new String[] { "username", "song" };
        BooleanQuery.Builder query = new BooleanQuery.Builder();
        query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f), BooleanClause.Occur.SHOULD);
        query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "fighters"), 0.1f), BooleanClause.Occur.SHOULD);
        query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "generator"), 0.1f), BooleanClause.Occur.SHOULD);
        TopDocs search = searcher.search(query.build(), 10);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue());
    }
    {
        BooleanQuery.Builder query = new BooleanQuery.Builder();
        DisjunctionMaxQuery uname = new DisjunctionMaxQuery(Arrays.asList(new TermQuery(new Term("username", "foo")), new TermQuery(new Term("song", "foo"))), 0.0f);
        DisjunctionMaxQuery s = new DisjunctionMaxQuery(Arrays.asList(new TermQuery(new Term("username", "fighers")), new TermQuery(new Term("song", "fighers"))), 0.0f);
        DisjunctionMaxQuery gen = new DisjunctionMaxQuery(Arrays.asList(new TermQuery(new Term("username", "generator")), new TermQuery(new Term("song", "generator"))), 0f);
        query.add(uname, BooleanClause.Occur.SHOULD);
        query.add(s, BooleanClause.Occur.SHOULD);
        query.add(gen, BooleanClause.Occur.SHOULD);
        TopDocs search = searcher.search(query.build(), 4);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue());
    }
    {
        String[] fields = new String[] { "username", "song", "unknown_field" };
        Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 1.0f);
        Query rewrite = searcher.rewrite(query);
        assertThat(rewrite, instanceOf(BooleanQuery.class));
        for (BooleanClause clause : (BooleanQuery) rewrite) {
            assertThat(clause.getQuery(), instanceOf(TermQuery.class));
            TermQuery termQuery = (TermQuery) clause.getQuery();
            TermStates termStates = termQuery.getTermStates();
            if (termQuery.getTerm().field().equals("unknown_field")) {
                assertThat(termStates.docFreq(), equalTo(0));
                assertThat(termStates.totalTermFreq(), equalTo(0L));
            } else {
                assertThat(termStates.docFreq(), greaterThan(0));
                assertThat(termStates.totalTermFreq(), greaterThan(0L));
            }
        }
        assertThat(searcher.search(query, 10).totalHits.value, equalTo((long) iters + username.length));
    }
    {
        String[] fields = new String[] { "username", "song", "unknown_field" };
        Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "unknown_term"), 1.0f);
        Query rewrite = searcher.rewrite(query);
        assertThat(rewrite, instanceOf(BooleanQuery.class));
        for (BooleanClause clause : (BooleanQuery) rewrite) {
            assertThat(clause.getQuery(), instanceOf(TermQuery.class));
            TermQuery termQuery = (TermQuery) clause.getQuery();
            TermStates termStates = termQuery.getTermStates();
            assertThat(termStates.docFreq(), equalTo(0));
            assertThat(termStates.totalTermFreq(), equalTo(0L));
        }
        assertThat(searcher.search(query, 10).totalHits.value, equalTo(0L));
    }
    {
        String[] fields = new String[] { "username", "song", "id", "unknown_field" };
        Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "fan"), 1.0f);
        Query rewrite = searcher.rewrite(query);
        assertThat(rewrite, instanceOf(BooleanQuery.class));
        for (BooleanClause clause : (BooleanQuery) rewrite) {
            assertThat(clause.getQuery(), instanceOf(TermQuery.class));
            TermQuery termQuery = (TermQuery) clause.getQuery();
            TermStates termStates = termQuery.getTermStates();
            if (termQuery.getTerm().field().equals("username")) {
                assertThat(termStates.docFreq(), equalTo(1));
                assertThat(termStates.totalTermFreq(), equalTo(1L));
            } else {
                assertThat(termStates.docFreq(), equalTo(0));
                assertThat(termStates.totalTermFreq(), equalTo(0L));
            }
        }
        assertThat(searcher.search(query, 10).totalHits.value, equalTo(1L));
    }
    reader.close();
    w.close();
    dir.close();
}
283763.9716103elasticsearch
public void testBuildTable() {
    final int numIndices = randomIntBetween(3, 20);
    final Map<String, Settings> indicesSettings = new LinkedHashMap<>();
    final Map<String, IndexStats> indicesStats = new HashMap<>();
    final Metadata.Builder metadata = Metadata.builder();
    final RoutingTable.Builder routingTable = RoutingTable.builder();
    for (int i = 0; i < numIndices; i++) {
        String indexName = "index-" + i;
        Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), randomBoolean()).build();
        indicesSettings.put(indexName, indexSettings);
        IndexMetadata.State indexState = randomBoolean() ? IndexMetadata.State.OPEN : IndexMetadata.State.CLOSE;
        if (frequently()) {
            ClusterHealthStatus healthStatus = randomFrom(ClusterHealthStatus.values());
            int numberOfShards = randomIntBetween(1, 3);
            int numberOfReplicas = healthStatus == ClusterHealthStatus.YELLOW ? 1 : randomInt(1);
            IndexMetadata indexMetadata = IndexMetadata.builder(indexName).settings(indexSettings).creationDate(System.currentTimeMillis()).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).state(indexState).build();
            metadata.put(indexMetadata, false);
            if (frequently()) {
                Index index = indexMetadata.getIndex();
                IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index);
                switch(randomFrom(ClusterHealthStatus.values())) {
                    case GREEN:
                        IntStream.range(0, numberOfShards).mapToObj(n -> new ShardId(index, n)).map(shardId -> TestShardRouting.newShardRouting(shardId, "nodeA", true, ShardRoutingState.STARTED)).forEach(indexRoutingTable::addShard);
                        if (numberOfReplicas > 0) {
                            IntStream.range(0, numberOfShards).mapToObj(n -> new ShardId(index, n)).map(shardId -> TestShardRouting.newShardRouting(shardId, "nodeB", false, ShardRoutingState.STARTED)).forEach(indexRoutingTable::addShard);
                        }
                        break;
                    case YELLOW:
                        IntStream.range(0, numberOfShards).mapToObj(n -> new ShardId(index, n)).map(shardId -> TestShardRouting.newShardRouting(shardId, "nodeA", true, ShardRoutingState.STARTED)).forEach(indexRoutingTable::addShard);
                        if (numberOfReplicas > 0) {
                            IntStream.range(0, numberOfShards).mapToObj(n -> new ShardId(index, n)).map(shardId -> TestShardRouting.newShardRouting(shardId, null, false, ShardRoutingState.UNASSIGNED)).forEach(indexRoutingTable::addShard);
                        }
                        break;
                    case RED:
                        break;
                }
                routingTable.add(indexRoutingTable);
                if (frequently()) {
                    IndexStats indexStats = mock(IndexStats.class);
                    when(indexStats.getPrimaries()).thenReturn(new CommonStats());
                    when(indexStats.getTotal()).thenReturn(new CommonStats());
                    indicesStats.put(indexName, indexStats);
                }
            }
        }
    }
    final ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).routingTable(randomBoolean() ? routingTable : RoutingTable.builder()).build();
    final RestIndicesAction action = new RestIndicesAction();
    final Table table = action.buildTable(new FakeRestRequest(), indicesSettings, clusterState, indicesStats);
    List<Table.Cell> headers = table.getHeaders();
    assertThat(headers.get(0).value, equalTo("health"));
    assertThat(headers.get(1).value, equalTo("status"));
    assertThat(headers.get(2).value, equalTo("index"));
    assertThat(headers.get(3).value, equalTo("uuid"));
    assertThat(headers.get(4).value, equalTo("pri"));
    assertThat(headers.get(5).value, equalTo("rep"));
    final List<List<Table.Cell>> rows = table.getRows();
    assertThat(rows.size(), equalTo(clusterState.metadata().indices().size()));
    final var clusterStateHealth = new ClusterStateHealth(clusterState);
    for (final List<Table.Cell> row : rows) {
        final String indexName = (String) row.get(2).value;
        ClusterIndexHealth indexHealth = clusterStateHealth.getIndices().get(indexName);
        IndexStats indexStats = indicesStats.get(indexName);
        IndexMetadata indexMetadata = clusterState.metadata().index(indexName);
        if (indexHealth != null) {
            assertThat(row.get(0).value, equalTo(indexHealth.getStatus().toString().toLowerCase(Locale.ROOT)));
        } else if (indexStats != null) {
            assertThat(row.get(0).value, equalTo("red*"));
        } else {
            assertThat(row.get(0).value, equalTo(""));
        }
        assertThat(row.get(1).value, equalTo(indexMetadata.getState().toString().toLowerCase(Locale.ROOT)));
        assertThat(row.get(2).value, equalTo(indexName));
        assertThat(row.get(3).value, equalTo(indexMetadata.getIndexUUID()));
        assertThat(row.get(4).value, equalTo(indexMetadata.getNumberOfShards()));
        assertThat(row.get(5).value, equalTo(indexMetadata.getNumberOfReplicas()));
    }
}
281655.72179elasticsearch
public void testCollectNodes() throws InterruptedException, IOException {
    final Settings settings = Settings.EMPTY;
    final List<DiscoveryNode> knownNodes_c1 = new CopyOnWriteArrayList<>();
    final List<DiscoveryNode> knownNodes_c2 = new CopyOnWriteArrayList<>();
    try (MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes_c1, VersionInformation.CURRENT, TransportVersion.current(), settings);
        MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes_c1, VersionInformation.CURRENT, TransportVersion.current(), settings);
        MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes_c2, VersionInformation.CURRENT, TransportVersion.current(), settings);
        MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes_c2, VersionInformation.CURRENT, TransportVersion.current(), settings)) {
        final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode();
        final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode();
        final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode();
        final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode();
        knownNodes_c1.add(c1N1Node);
        knownNodes_c1.add(c1N2Node);
        knownNodes_c2.add(c2N1Node);
        knownNodes_c2.add(c2N2Node);
        Collections.shuffle(knownNodes_c1, random());
        Collections.shuffle(knownNodes_c2, random());
        try (MockTransportService transportService = MockTransportService.createNewService(settings, VersionInformation.CURRENT, TransportVersion.current(), threadPool, null)) {
            transportService.start();
            transportService.acceptIncomingRequests();
            final Settings.Builder builder = Settings.builder();
            builder.putList("cluster.remote.cluster_1.seed", c1N1Node.getAddress().toString());
            builder.putList("cluster.remote.cluster_2.seed", c2N1Node.getAddress().toString());
            try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) {
                assertFalse(service.isCrossClusterSearchEnabled());
                service.initializeRemoteClusters();
                assertFalse(service.isCrossClusterSearchEnabled());
                final CountDownLatch firstLatch = new CountDownLatch(1);
                service.updateRemoteCluster("cluster_1", createSettings("cluster_1", Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString())), connectionListener(firstLatch));
                firstLatch.await();
                final CountDownLatch secondLatch = new CountDownLatch(1);
                service.updateRemoteCluster("cluster_2", createSettings("cluster_2", Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString())), connectionListener(secondLatch));
                secondLatch.await();
                CountDownLatch latch = new CountDownLatch(1);
                service.collectNodes(new HashSet<>(Arrays.asList("cluster_1", "cluster_2")), new ActionListener<BiFunction<String, String, DiscoveryNode>>() {

                    @Override
                    public void onResponse(BiFunction<String, String, DiscoveryNode> func) {
                        try {
                            assertEquals(c1N1Node, func.apply("cluster_1", c1N1Node.getId()));
                            assertEquals(c1N2Node, func.apply("cluster_1", c1N2Node.getId()));
                            assertEquals(c2N1Node, func.apply("cluster_2", c2N1Node.getId()));
                            assertEquals(c2N2Node, func.apply("cluster_2", c2N2Node.getId()));
                        } finally {
                            latch.countDown();
                        }
                    }

                    @Override
                    public void onFailure(Exception e) {
                        try {
                            throw new AssertionError(e);
                        } finally {
                            latch.countDown();
                        }
                    }
                });
                latch.await();
                {
                    CountDownLatch failLatch = new CountDownLatch(1);
                    AtomicReference<Exception> ex = new AtomicReference<>();
                    service.collectNodes(new HashSet<>(Arrays.asList("cluster_1", "cluster_2", "no such cluster")), new ActionListener<BiFunction<String, String, DiscoveryNode>>() {

                        @Override
                        public void onResponse(BiFunction<String, String, DiscoveryNode> stringStringDiscoveryNodeBiFunction) {
                            try {
                                fail("should not be called");
                            } finally {
                                failLatch.countDown();
                            }
                        }

                        @Override
                        public void onFailure(Exception e) {
                            try {
                                ex.set(e);
                            } finally {
                                failLatch.countDown();
                            }
                        }
                    });
                    failLatch.await();
                    assertNotNull(ex.get());
                    assertTrue(ex.get() instanceof NoSuchRemoteClusterException);
                    assertEquals("no such remote cluster: [no such cluster]", ex.get().getMessage());
                }
                {
                    logger.info("closing all source nodes");
                    IOUtils.close(c1N1, c1N2, c2N1, c2N2);
                    logger.info("all source nodes are closed");
                    CountDownLatch failLatch = new CountDownLatch(1);
                    AtomicReference<Exception> ex = new AtomicReference<>();
                    service.collectNodes(new HashSet<>(Arrays.asList("cluster_1", "cluster_2")), new ActionListener<BiFunction<String, String, DiscoveryNode>>() {

                        @Override
                        public void onResponse(BiFunction<String, String, DiscoveryNode> stringStringDiscoveryNodeBiFunction) {
                            try {
                                fail("should not be called");
                            } finally {
                                failLatch.countDown();
                            }
                        }

                        @Override
                        public void onFailure(Exception e) {
                            try {
                                ex.set(e);
                            } finally {
                                failLatch.countDown();
                            }
                        }
                    });
                    failLatch.await();
                    assertNotNull(ex.get());
                    if (ex.get() instanceof IllegalStateException) {
                        assertThat(ex.get().getMessage(), either(equalTo("Unable to open any connections to remote cluster [cluster_1]")).or(equalTo("Unable to open any connections to remote cluster [cluster_2]")));
                    } else {
                        assertThat(ex.get(), either(instanceOf(TransportException.class)).or(instanceOf(NoSuchRemoteClusterException.class)).or(instanceOf(NoSeedNodeLeftException.class)));
                    }
                }
            }
        }
    }
}
282740.441147elasticsearch
public void testUpdateAutoFollowersNoActivePatterns() {
    final ClusterService clusterService = mockClusterService();
    final AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator(Settings.EMPTY, null, clusterService, new CcrLicenseChecker(() -> true, () -> false), () -> 1L, () -> 1L, Runnable::run);
    autoFollowCoordinator.updateAutoFollowers(ClusterState.EMPTY_STATE);
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(0));
    Map<String, AutoFollowPattern> patterns = new HashMap<>();
    patterns.put("pattern1", createAutoFollowPattern("remote1", "logs-*"));
    patterns.put("pattern2", createAutoFollowPattern("remote2", "logs-*"));
    patterns.put("pattern3", createAutoFollowPattern("remote2", "metrics-*"));
    autoFollowCoordinator.updateAutoFollowers(ClusterState.builder(new ClusterName("remote")).metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()))).build());
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(2));
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote1"), notNullValue());
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote2"), notNullValue());
    AutoFollowCoordinator.AutoFollower removedAutoFollower1 = autoFollowCoordinator.getAutoFollowers().get("remote1");
    assertThat(removedAutoFollower1.removed, is(false));
    AutoFollowCoordinator.AutoFollower removedAutoFollower2 = autoFollowCoordinator.getAutoFollowers().get("remote2");
    assertThat(removedAutoFollower2.removed, is(false));
    patterns.computeIfPresent("pattern1", (name, pattern) -> new AutoFollowPattern(pattern.getRemoteCluster(), pattern.getLeaderIndexPatterns(), pattern.getLeaderIndexExclusionPatterns(), pattern.getFollowIndexPattern(), Settings.EMPTY, false, pattern.getMaxReadRequestOperationCount(), pattern.getMaxWriteRequestOperationCount(), pattern.getMaxOutstandingReadRequests(), pattern.getMaxOutstandingWriteRequests(), pattern.getMaxReadRequestSize(), pattern.getMaxWriteRequestSize(), pattern.getMaxWriteBufferCount(), pattern.getMaxWriteBufferSize(), pattern.getMaxRetryDelay(), pattern.getReadPollTimeout()));
    patterns.computeIfPresent("pattern3", (name, pattern) -> new AutoFollowPattern(pattern.getRemoteCluster(), pattern.getLeaderIndexPatterns(), pattern.getLeaderIndexExclusionPatterns(), pattern.getFollowIndexPattern(), Settings.EMPTY, false, pattern.getMaxReadRequestOperationCount(), pattern.getMaxWriteRequestOperationCount(), pattern.getMaxOutstandingReadRequests(), pattern.getMaxOutstandingWriteRequests(), pattern.getMaxReadRequestSize(), pattern.getMaxWriteRequestSize(), pattern.getMaxWriteBufferCount(), pattern.getMaxWriteBufferSize(), pattern.getMaxRetryDelay(), pattern.getReadPollTimeout()));
    autoFollowCoordinator.updateAutoFollowers(ClusterState.builder(new ClusterName("remote")).metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()))).build());
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(1));
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote2"), notNullValue());
    assertThat(removedAutoFollower1.removed, is(true));
    assertThat(removedAutoFollower2.removed, is(false));
    patterns.put("pattern4", createAutoFollowPattern("remote1", "metrics-*"));
    patterns.computeIfPresent("pattern2", (name, pattern) -> new AutoFollowPattern(pattern.getRemoteCluster(), pattern.getLeaderIndexPatterns(), pattern.getLeaderIndexExclusionPatterns(), pattern.getFollowIndexPattern(), Settings.EMPTY, false, pattern.getMaxReadRequestOperationCount(), pattern.getMaxWriteRequestOperationCount(), pattern.getMaxOutstandingReadRequests(), pattern.getMaxOutstandingWriteRequests(), pattern.getMaxReadRequestSize(), pattern.getMaxWriteRequestSize(), pattern.getMaxWriteBufferCount(), pattern.getMaxWriteBufferSize(), pattern.getMaxRetryDelay(), pattern.getReadPollTimeout()));
    autoFollowCoordinator.updateAutoFollowers(ClusterState.builder(new ClusterName("remote")).metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()))).build());
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(1));
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote1"), notNullValue());
    AutoFollowCoordinator.AutoFollower removedAutoFollower4 = autoFollowCoordinator.getAutoFollowers().get("remote1");
    assertThat(removedAutoFollower4.removed, is(false));
    assertNotSame(removedAutoFollower4, removedAutoFollower1);
    assertThat(removedAutoFollower2.removed, is(true));
    autoFollowCoordinator.updateAutoFollowers(ClusterState.builder(new ClusterName("remote")).metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()))).build());
    assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(0));
    assertThat(removedAutoFollower1.removed, is(true));
    assertThat(removedAutoFollower2.removed, is(true));
    assertThat(removedAutoFollower4.removed, is(true));
}
28824.8215180elasticsearch
private ShardFollowNodeTask createShardFollowTask(ShardFollowTaskParams params) {
    AtomicBoolean stopped = new AtomicBoolean(false);
    ShardFollowTask followTask = new ShardFollowTask(params.remoteCluster, params.followShardId, params.leaderShardId, params.maxReadRequestOperationCount, params.maxWriteRequestOperationCount, params.maxOutstandingReadRequests, params.maxOutstandingWriteRequests, params.maxReadRequestSize, params.maxWriteRequestSize, params.maxWriteBufferCount, params.maxWriteBufferSize, params.maxRetryDelay, params.readPollTimeout, params.headers);
    shardChangesRequests = new ArrayList<>();
    bulkShardOperationRequests = new ArrayList<>();
    readFailures = new LinkedList<>();
    writeFailures = new LinkedList<>();
    mappingUpdateFailures = new LinkedList<>();
    mappingVersions = new LinkedList<>();
    settingsUpdateFailures = new LinkedList<>();
    settingsVersions = new LinkedList<>();
    aliasesUpdateFailures = new LinkedList<>();
    aliasesVersions = new LinkedList<>();
    leaderGlobalCheckpoints = new LinkedList<>();
    followerGlobalCheckpoints = new LinkedList<>();
    maxSeqNos = new LinkedList<>();
    responseSizes = new LinkedList<>();
    pendingBulkShardRequests = new LinkedList<>();
    return new ShardFollowNodeTask(1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), followTask, scheduler, System::nanoTime) {

        @Override
        protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer<Exception> errorHandler) {
            Exception failure = mappingUpdateFailures.poll();
            if (failure != null) {
                errorHandler.accept(failure);
                return;
            }
            final Long mappingVersion = mappingVersions.poll();
            if (mappingVersion != null) {
                handler.accept(mappingVersion);
            }
        }

        @Override
        protected void innerUpdateSettings(LongConsumer handler, Consumer<Exception> errorHandler) {
            Exception failure = settingsUpdateFailures.poll();
            if (failure != null) {
                errorHandler.accept(failure);
                return;
            }
            final Long settingsVersion = settingsVersions.poll();
            if (settingsVersion != null) {
                handler.accept(settingsVersion);
            }
        }

        @Override
        protected void innerUpdateAliases(final LongConsumer handler, final Consumer<Exception> errorHandler) {
            final Exception failure = aliasesUpdateFailures.poll();
            if (failure != null) {
                errorHandler.accept(failure);
                return;
            }
            final Long aliasesVersion = aliasesVersions.poll();
            if (aliasesVersion != null) {
                handler.accept(aliasesVersion);
            }
        }

        @Override
        protected void innerSendBulkShardOperationsRequest(String followerHistoryUUID, final List<Translog.Operation> operations, final long maxSeqNoOfUpdates, final Consumer<BulkShardOperationsResponse> handler, final Consumer<Exception> errorHandler) {
            bulkShardOperationRequests.add(operations);
            Exception writeFailure = ShardFollowNodeTaskTests.this.writeFailures.poll();
            if (writeFailure != null) {
                errorHandler.accept(writeFailure);
                return;
            }
            Long followerGlobalCheckpoint = followerGlobalCheckpoints.poll();
            if (followerGlobalCheckpoint != null) {
                final BulkShardOperationsResponse response = new BulkShardOperationsResponse();
                response.setGlobalCheckpoint(followerGlobalCheckpoint);
                response.setMaxSeqNo(followerGlobalCheckpoint);
                handler.accept(response);
            } else {
                pendingBulkShardRequests.add(ActionListener.wrap(handler::accept, errorHandler));
            }
        }

        @Override
        protected void innerSendShardChangesRequest(long from, int requestBatchSize, Consumer<ShardChangesAction.Response> handler, Consumer<Exception> errorHandler) {
            beforeSendShardChangesRequest.accept(getStatus());
            shardChangesRequests.add(new long[] { from, requestBatchSize });
            Exception readFailure = ShardFollowNodeTaskTests.this.readFailures.poll();
            if (readFailure != null) {
                errorHandler.accept(readFailure);
            } else if (simulateResponse.get()) {
                final int responseSize = responseSizes.size() == 0 ? 0 : responseSizes.poll();
                final Translog.Operation[] operations = new Translog.Operation[responseSize];
                for (int i = 0; i < responseSize; i++) {
                    operations[i] = new Translog.NoOp(from + i, 0, "test");
                }
                final ShardChangesAction.Response response = new ShardChangesAction.Response(mappingVersions.poll(), 0L, 0L, leaderGlobalCheckpoints.poll(), maxSeqNos.poll(), randomNonNegativeLong(), operations, 1L);
                handler.accept(response);
            }
        }

        @Override
        protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) {
            if (scheduleRetentionLeaseRenewal.get()) {
                final ScheduledThreadPoolExecutor testScheduler = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler");
                final ScheduledFuture<?> future = testScheduler.scheduleWithFixedDelay(() -> retentionLeaseRenewal.accept(followerGlobalCheckpoint.getAsLong()), 0, TimeValue.timeValueMillis(200).millis(), TimeUnit.MILLISECONDS);
                return new Scheduler.Cancellable() {

                    @Override
                    public boolean cancel() {
                        final boolean cancel = future.cancel(true);
                        testScheduler.shutdown();
                        return cancel;
                    }

                    @Override
                    public boolean isCancelled() {
                        return future.isCancelled();
                    }
                };
            } else {
                return new Scheduler.Cancellable() {

                    @Override
                    public boolean cancel() {
                        return true;
                    }

                    @Override
                    public boolean isCancelled() {
                        return true;
                    }
                };
            }
        }

        @Override
        protected boolean isStopped() {
            return super.isStopped() || stopped.get();
        }

        @Override
        public void markAsCompleted() {
            stopped.set(true);
        }
    };
}
282605.431142elasticsearch
public void testCalculateUsage() {
    final IndexNameExpressionResolver iner = new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY), EmptySystemIndices.INSTANCE);
    {
        ClusterState state = ClusterState.builder(new ClusterName("mycluster")).build();
        assertThat(LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())));
    }
    {
        ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING)).build()).build();
        assertThat(LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), equalTo(new ItemUsage(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())));
    }
    {
        ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING)).put(IndexMetadata.builder("myindex").settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy"))).build()).build();
        assertThat(LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.emptyList())));
    }
    {
        ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING)).put(IndexMetadata.builder("myindex").settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy"))).putCustom(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(Collections.singletonMap("mytemplate", ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList("myds")).template(new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null)).dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)).build()))).build()).build();
        assertThat(LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), equalTo(new ItemUsage(Collections.singleton("myindex"), Collections.emptyList(), Collections.singleton("mytemplate"))));
    }
    {
        Metadata.Builder mBuilder = Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), OperationMode.RUNNING)).put(IndexMetadata.builder("myindex").settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy"))).put(IndexMetadata.builder("another").settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy"))).put(IndexMetadata.builder("other").settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "otherpolicy"))).putCustom(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(Collections.singletonMap("mytemplate", ComposableIndexTemplate.builder().indexPatterns(Collections.singletonList("myds")).template(new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy").build(), null, null)).dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)).build())));
        mBuilder.put(DataStreamTestHelper.newInstance("myds", Collections.singletonList(mBuilder.get("myindex").getIndex())));
        ClusterState state = ClusterState.builder(new ClusterName("mycluster")).metadata(mBuilder.build()).build();
        assertThat(LifecyclePolicyUtils.calculateUsage(iner, state, "mypolicy"), equalTo(new ItemUsage(Arrays.asList("myindex", "another"), Collections.singleton("myds"), Collections.singleton("mytemplate"))));
    }
}
283354.971132elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.add(new TestCaseSupplier("empty string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        int length = between(-64, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(""), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeLeftSubstring(text, length))));
    }));
    suppliers.add(new TestCaseSupplier("ascii longer than string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(text.length(), 128);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(text)));
    }));
    suppliers.add(new TestCaseSupplier("ascii zero length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii negative length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(-128, -1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("unicode", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeLeftSubstring(text, length))));
    }));
    suppliers.add(new TestCaseSupplier("unicode longer than string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(text.length(), 128);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(text)));
    }));
    suppliers.add(new TestCaseSupplier("unicode zero length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("unicode negative length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(-128, -1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii as text input", List.of(DataTypes.TEXT, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.TEXT, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeLeftSubstring(text, length))));
    }));
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
283354.971132elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.add(new TestCaseSupplier("empty string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        int length = between(-64, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(""), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeRightSubstring(text, length))));
    }));
    suppliers.add(new TestCaseSupplier("ascii longer than string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(text.length(), 128);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(text)));
    }));
    suppliers.add(new TestCaseSupplier("ascii zero length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii negative length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(-128, -1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("unicode", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeRightSubstring(text, length))));
    }));
    suppliers.add(new TestCaseSupplier("unicode longer than string", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(text.length(), 128);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(text)));
    }));
    suppliers.add(new TestCaseSupplier("unicode zero length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("unicode negative length", List.of(DataTypes.KEYWORD, DataTypes.INTEGER), () -> {
        String text = randomUnicodeOfLengthBetween(1, 64);
        int length = between(-128, -1);
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef("")));
    }));
    suppliers.add(new TestCaseSupplier("ascii as text", List.of(DataTypes.TEXT, DataTypes.INTEGER), () -> {
        String text = randomAlphaOfLengthBetween(1, 64);
        int length = between(1, text.length());
        return new TestCaseSupplier.TestCase(List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.TEXT, "str"), new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length")), "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", DataTypes.KEYWORD, equalTo(new BytesRef(unicodeRightSubstring(text, length))));
    }));
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers)));
}
282041.581160elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.addAll(TestCaseSupplier.forBinaryComparisonWithWidening(new TestCaseSupplier.NumericTypeTestConfigs<>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> l.intValue() != r.intValue(), "NotEqualsIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> l.longValue() != r.longValue(), "NotEqualsLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> l.doubleValue() != r.doubleValue(), "NotEqualsDoublesEvaluator")), "lhs", "rhs", (lhs, rhs) -> List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsLongsEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), List.of(), true));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsBoolsEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.booleanCases(), TestCaseSupplier.booleanCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsKeywordsEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.ipCases(), TestCaseSupplier.ipCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsKeywordsEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.versionCases(""), TestCaseSupplier.versionCases(""), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsLongsEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.stringCases((l, r) -> false == l.equals(r), (lhsType, rhsType) -> "NotEqualsKeywordsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", List.of(), DataTypes.BOOLEAN));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsGeometriesEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.geoPointCases(), TestCaseSupplier.geoPointCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsGeometriesEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.geoShapeCases(), TestCaseSupplier.geoShapeCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsGeometriesEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.cartesianPointCases(), TestCaseSupplier.cartesianPointCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("NotEqualsGeometriesEvaluator", "lhs", "rhs", (l, r) -> false == l.equals(r), DataTypes.BOOLEAN, TestCaseSupplier.cartesianShapeCases(), TestCaseSupplier.cartesianShapeCases(), List.of(), false));
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators));
}
281806.094488elasticsearch
public final boolean incrementToken() throws IOException {
    clearAttributes();
    skippedPositions = 0;
    int start = -1;
    int length = 0;
    boolean haveNonHex = false;
    int lettersBeforeColon = 0;
    boolean haveColon = false;
    int firstBackslashPos = -1;
    int firstForwardSlashPos = -1;
    int slashCount = 0;
    int curChar;
    while ((curChar = getNextChar()) >= 0) {
        ++nextOffset;
        if (Character.isLetterOrDigit(curChar) || (length > 0 && (curChar == '_' || curChar == '.' || curChar == '-' || curChar == '@' || (curChar == ':' && lettersBeforeColon == length))) || curChar == '/' || (curChar == '\\' && (length == 0 || (haveColon && lettersBeforeColon == 1) || firstBackslashPos == 0))) {
            if (length == 0) {
                start = nextOffset - 1;
            }
            termAtt.append((char) curChar);
            ++length;
            if (curChar == ':') {
                haveColon = true;
            } else if (curChar == '/') {
                ++slashCount;
                if (firstForwardSlashPos == -1) {
                    firstForwardSlashPos = length - 1;
                }
            } else if (curChar == '\\') {
                ++slashCount;
                if (firstBackslashPos == -1) {
                    firstBackslashPos = length - 1;
                }
            } else {
                if (haveColon) {
                    if (firstBackslashPos != lettersBeforeColon + 1 && firstForwardSlashPos != lettersBeforeColon + 1) {
                        assert length - lettersBeforeColon == 2;
                        length -= 2;
                        putBackChar = curChar;
                        --nextOffset;
                        break;
                    }
                } else if (Character.isLetter(curChar)) {
                    ++lettersBeforeColon;
                }
            }
            haveNonHex = haveNonHex || (Character.digit(curChar, 16) == -1 && curChar != '.' && curChar != '-' && curChar != '@' && curChar != ':');
        } else if (length > 0) {
            if (haveNonHex && Character.isDigit(termAtt.charAt(0)) == false && length > slashCount) {
                break;
            }
            ++skippedPositions;
            start = -1;
            length = 0;
            termAtt.setEmpty();
            haveNonHex = false;
            lettersBeforeColon = 0;
            haveColon = false;
            firstBackslashPos = -1;
            firstForwardSlashPos = -1;
            slashCount = 0;
        }
    }
    if (length == 0) {
        return false;
    }
    if (haveNonHex == false || Character.isDigit(termAtt.charAt(0)) || length == slashCount) {
        ++skippedPositions;
        return false;
    }
    char toCheck;
    while ((toCheck = termAtt.charAt(length - 1)) == '_' || toCheck == '.' || toCheck == '-' || toCheck == '@' || toCheck == ':') {
        --length;
    }
    termAtt.setLength(length);
    offsetAtt.setOffset(correctOffset(start), correctOffset(start + length));
    posIncrAtt.setPositionIncrement(skippedPositions + 1);
    return true;
}
285330.21119elasticsearch
public void testRealtimeRun() throws Exception {
    flushJobResponse = new FlushJobAction.Response(true, Instant.ofEpochMilli(2000));
    Bucket bucket = mock(Bucket.class);
    when(bucket.getTimestamp()).thenReturn(new Date(2000));
    when(bucket.getEpoch()).thenReturn(2L);
    when(bucket.getBucketSpan()).thenReturn(4L);
    when(flushJobFuture.actionGet()).thenReturn(flushJobResponse);
    when(client.execute(same(FlushJobAction.INSTANCE), flushJobRequests.capture())).thenReturn(flushJobFuture);
    when(delayedDataDetector.detectMissingData(2000)).thenReturn(Collections.singletonList(BucketWithMissingData.fromMissingAndBucket(10, bucket)));
    currentTime = DELAYED_DATA_FREQ_HALF;
    long frequencyMs = 100;
    long queryDelayMs = 1000;
    DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, false, DELAYED_DATA_FREQ);
    long next = datafeedJob.runRealtime();
    assertEquals(currentTime + frequencyMs + 100, next);
    verify(dataExtractorFactory).newExtractor(1000L + 1L, currentTime - queryDelayMs);
    FlushJobAction.Request flushRequest = new FlushJobAction.Request(jobId);
    flushRequest.setCalcInterim(true);
    flushRequest.setAdvanceTime("59000");
    flushRequest.setWaitForNormalization(false);
    flushRequest.setRefreshRequired(false);
    verify(client).execute(same(FlushJobAction.INSTANCE), eq(flushRequest));
    verify(client, never()).execute(same(PersistJobAction.INSTANCE), any());
    currentTime = currentTime + DELAYED_DATA_FREQ_HALF - 1;
    byte[] contentBytes = "content".getBytes(StandardCharsets.UTF_8);
    InputStream inputStream = new ByteArrayInputStream(contentBytes);
    when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false);
    when(dataExtractor.next()).thenReturn(new DataExtractor.Result(new SearchInterval(1000L, 2000L), Optional.of(inputStream)));
    when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor);
    datafeedJob.runRealtime();
    currentTime = currentTime + DELAYED_DATA_FREQ_HALF;
    inputStream = new ByteArrayInputStream(contentBytes);
    when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false);
    when(dataExtractor.next()).thenReturn(new DataExtractor.Result(new SearchInterval(1000L, 2000L), Optional.of(inputStream)));
    when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor);
    datafeedJob.runRealtime();
    String msg = Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_MISSING_DATA, 10, XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(2000)));
    long annotationCreateTime = currentTime;
    {
        Annotation expectedAnnotation = new Annotation.Builder().setAnnotation(msg).setCreateTime(new Date(annotationCreateTime)).setCreateUsername(InternalUsers.XPACK_USER.principal()).setTimestamp(bucket.getTimestamp()).setEndTimestamp(new Date((bucket.getEpoch() + bucket.getBucketSpan()) * 1000)).setJobId(jobId).setModifiedTime(new Date(annotationCreateTime)).setModifiedUsername(InternalUsers.XPACK_USER.principal()).setType(Annotation.Type.ANNOTATION).setEvent(Annotation.Event.DELAYED_DATA).build();
        BytesReference expectedSource = BytesReference.bytes(expectedAnnotation.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS));
        ArgumentCaptor<BulkRequest> bulkRequestArgumentCaptor = ArgumentCaptor.forClass(BulkRequest.class);
        verify(client, atMost(2)).execute(eq(TransportBulkAction.TYPE), bulkRequestArgumentCaptor.capture(), any());
        BulkRequest bulkRequest = bulkRequestArgumentCaptor.getValue();
        assertThat(bulkRequest.requests(), hasSize(1));
        IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(0);
        assertThat(indexRequest.index(), equalTo(AnnotationIndex.WRITE_ALIAS_NAME));
        assertThat(indexRequest.id(), nullValue());
        assertThat(indexRequest.source().utf8ToString(), equalTo(expectedSource.utf8ToString()));
        assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.INDEX));
    }
    Bucket bucket2 = mock(Bucket.class);
    when(bucket2.getTimestamp()).thenReturn(new Date(6000));
    when(bucket2.getEpoch()).thenReturn(6L);
    when(bucket2.getBucketSpan()).thenReturn(4L);
    when(delayedDataDetector.detectMissingData(2000)).thenReturn(Arrays.asList(BucketWithMissingData.fromMissingAndBucket(10, bucket), BucketWithMissingData.fromMissingAndBucket(5, bucket2)));
    currentTime = currentTime + DELAYED_DATA_WINDOW + 1;
    inputStream = new ByteArrayInputStream(contentBytes);
    when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false);
    when(dataExtractor.next()).thenReturn(new DataExtractor.Result(new SearchInterval(1000L, 2000L), Optional.of(inputStream)));
    when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor);
    datafeedJob.runRealtime();
    msg = Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_MISSING_DATA, 15, XContentElasticsearchExtension.DEFAULT_FORMATTER.format(Instant.ofEpochMilli(6000)));
    long annotationUpdateTime = currentTime;
    {
        Annotation expectedUpdatedAnnotation = new Annotation.Builder().setAnnotation(msg).setCreateTime(new Date(annotationCreateTime)).setCreateUsername(InternalUsers.XPACK_USER.principal()).setTimestamp(bucket.getTimestamp()).setEndTimestamp(new Date((bucket2.getEpoch() + bucket2.getBucketSpan()) * 1000)).setJobId(jobId).setModifiedTime(new Date(annotationUpdateTime)).setModifiedUsername(InternalUsers.XPACK_USER.principal()).setType(Annotation.Type.ANNOTATION).setEvent(Annotation.Event.DELAYED_DATA).build();
        BytesReference expectedSource = BytesReference.bytes(expectedUpdatedAnnotation.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS));
        ArgumentCaptor<BulkRequest> bulkRequestArgumentCaptor = ArgumentCaptor.forClass(BulkRequest.class);
        verify(client, atMost(2)).execute(eq(TransportBulkAction.TYPE), bulkRequestArgumentCaptor.capture(), any());
        BulkRequest bulkRequest = bulkRequestArgumentCaptor.getValue();
        assertThat(bulkRequest.requests(), hasSize(1));
        IndexRequest indexRequest = (IndexRequest) bulkRequest.requests().get(0);
        assertThat(indexRequest.index(), equalTo(AnnotationIndex.WRITE_ALIAS_NAME));
        assertThat(indexRequest.id(), equalTo(annotationDocId));
        assertThat(indexRequest.source(), equalTo(expectedSource));
        assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.INDEX));
    }
    currentTime = currentTime + DELAYED_DATA_WINDOW + 1;
    inputStream = new ByteArrayInputStream(contentBytes);
    when(dataExtractor.hasNext()).thenReturn(true).thenReturn(false);
    when(dataExtractor.next()).thenReturn(new DataExtractor.Result(new SearchInterval(1000L, 2000L), Optional.of(inputStream)));
    when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor);
    datafeedJob.runRealtime();
    verify(client, atMost(2)).index(any());
}
283167.961144elasticsearch
public void testDetectReasonToRebalanceModels_WithNodeShutdowns() {
    String clusterName = "testDetectReasonToRebalanceModels_WithNodeShutdowns";
    String model1 = "model-1";
    DiscoveryNode mlNode1 = buildNode("ml-node-1", true, ByteSizeValue.ofGb(4).getBytes(), 8);
    DiscoveryNode mlNode2 = buildNode("ml-node-2", true, ByteSizeValue.ofGb(4).getBytes(), 8);
    DiscoveryNode esNode1 = buildNode("es-node-1", false, ByteSizeValue.ofGb(4).getBytes(), 8);
    DiscoveryNode esNode2 = buildNode("es-node-2", false, ByteSizeValue.ofGb(4).getBytes(), 8);
    DiscoveryNode esNode3 = buildNode("es-node-3", false, ByteSizeValue.ofGb(4).getBytes(), 8);
    TrainedModelAssignmentMetadata fullModelAllocation = TrainedModelAssignmentMetadata.Builder.empty().addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100)).addRoutingEntry(mlNode1.getId(), new RoutingInfo(1, 1, RoutingState.STARTED, "")).addRoutingEntry(mlNode2.getId(), new RoutingInfo(1, 1, RoutingState.STARTED, ""))).build();
    ClusterState fullyAllocated = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build()).build();
    var previousState = fullyAllocated;
    var currentState = ClusterState.builder(fullyAllocated).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.of("nodes changed")));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.of("nodes changed")));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = currentState;
    currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.empty()));
    previousState = fullyAllocated;
    currentState = csBuilderWithNodes(clusterName, mlNode2, esNode1, esNode2, esNode3).metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)).build()).build();
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.of("nodes changed")));
    previousState = currentState;
    currentState = fullyAllocated;
    assertThat(TrainedModelAssignmentClusterService.detectReasonToRebalanceModels(new ClusterChangedEvent("test", currentState, previousState)), equalTo(Optional.of("nodes changed")));
}
283064.029130elasticsearch
public void testExport() throws Exception {
    try {
        if (randomBoolean()) {
            IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
            for (int i = 0; i < indexRequestBuilders.length; i++) {
                indexRequestBuilders[i] = prepareIndex("test").setId(Integer.toString(i)).setSource("title", "This is a random document");
            }
            indexRandom(true, indexRequestBuilders);
        }
        final Settings.Builder exporterSettings = Settings.builder().put(MonitoringService.ENABLED.getKey(), true).put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE).put("xpack.monitoring.exporters._local.enabled", true).put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", false);
        if (indexTimeFormat != null) {
            exporterSettings.put("xpack.monitoring.exporters._local.index.name.time_format", indexTimeFormat);
        }
        updateClusterSettings(exporterSettings);
        if (randomBoolean()) {
            final int nbDocs = randomIntBetween(1, 20);
            List<MonitoringBulkDoc> monitoringDocs = new ArrayList<>(nbDocs);
            for (int i = 0; i < nbDocs; i++) {
                monitoringDocs.add(createMonitoringBulkDoc());
            }
            assertBusy(() -> {
                MonitoringBulkRequestBuilder bulk = new MonitoringBulkRequestBuilder(client());
                monitoringDocs.forEach(bulk::add);
                assertEquals(RestStatus.OK, bulk.get().status());
                refresh();
                assertThat(indexExists(".monitoring-*"), is(true));
                ensureYellowAndNoInitializingShards(".monitoring-*");
                assertResponse(prepareSearch(".monitoring-*"), response -> assertThat((long) nbDocs, lessThanOrEqualTo(response.getHits().getTotalHits().value)));
            });
            checkMonitoringTemplates();
            checkMonitoringDocs();
        }
        final int numNodes = internalCluster().getNodeNames().length;
        assertBusy(() -> {
            assertThat(indexExists(".monitoring-*"), is(true));
            ensureYellowAndNoInitializingShards(".monitoring-*");
            assertThat(SearchResponseUtils.getTotalHitsValue(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "cluster_stats"))), greaterThan(0L));
            assertThat(SearchResponseUtils.getTotalHitsValue(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "index_recovery"))), greaterThan(0L));
            assertThat(SearchResponseUtils.getTotalHitsValue(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "index_stats"))), greaterThan(0L));
            assertThat(SearchResponseUtils.getTotalHitsValue(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "indices_stats"))), greaterThan(0L));
            assertThat(SearchResponseUtils.getTotalHitsValue(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "shards"))), greaterThan(0L));
            assertResponse(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "node_stats")).addAggregation(terms("agg_nodes_ids").field("node_stats.node_id")), response -> {
                Terms aggregation = response.getAggregations().get("agg_nodes_ids");
                assertEquals("Aggregation on node_id must return a bucket per node involved in test", numNodes, aggregation.getBuckets().size());
                for (String nodeName : internalCluster().getNodeNames()) {
                    String nodeId = getNodeId(nodeName);
                    Terms.Bucket bucket = aggregation.getBucketByKey(nodeId);
                    assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null);
                    assertTrue(bucket.getDocCount() >= 1L);
                }
            });
        }, 30L, TimeUnit.SECONDS);
        checkMonitoringTemplates();
        checkMonitoringDocs();
    } finally {
        stopMonitoring();
    }
    final int elapsedInSeconds = 10;
    final ZonedDateTime startTime = ZonedDateTime.now(ZoneOffset.UTC);
    assertBusy(() -> {
        if (indexExists(".monitoring-*")) {
            ensureYellowAndNoInitializingShards(".monitoring-*");
            refresh(".monitoring-es-*");
            assertResponse(prepareSearch(".monitoring-es-*").setSize(0).setQuery(QueryBuilders.termQuery("type", "node_stats")).addAggregation(terms("agg_nodes_ids").field("node_stats.node_id").subAggregation(max("agg_last_time_collected").field("timestamp"))), response -> {
                Terms aggregation = response.getAggregations().get("agg_nodes_ids");
                for (String nodeName : internalCluster().getNodeNames()) {
                    String nodeId = getNodeId(nodeName);
                    Terms.Bucket bucket = aggregation.getBucketByKey(nodeId);
                    assertTrue("No bucket found for node id [" + nodeId + "]", bucket != null);
                    assertTrue(bucket.getDocCount() >= 1L);
                    Max subAggregation = bucket.getAggregations().get("agg_last_time_collected");
                    ZonedDateTime lastCollection = Instant.ofEpochMilli(Math.round(subAggregation.value())).atZone(ZoneOffset.UTC);
                    assertTrue(lastCollection.plusSeconds(elapsedInSeconds).isBefore(ZonedDateTime.now(ZoneOffset.UTC)));
                }
            });
        } else {
            assertTrue(ZonedDateTime.now(ZoneOffset.UTC).isAfter(startTime.plusSeconds(elapsedInSeconds)));
        }
    }, 30L, TimeUnit.SECONDS);
}
283758.821133elasticsearch
public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() throws Exception {
    internalCluster().startMasterOnlyNode();
    internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
    final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode();
    final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode();
    final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot);
    final String indexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final int indexOutsideSearchRangeShardCount = randomIntBetween(1, 3);
    createIndexWithTimestamp(indexOutsideSearchRange, indexOutsideSearchRangeShardCount, Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex).build());
    indexDocumentsWithTimestampWithinDate(indexOutsideSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_OUTSIDE_RANGE);
    final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createRepository(repositoryName, "mock");
    final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexOutsideSearchRange)).snapshotId();
    final String searchableSnapshotIndexOutsideSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    blockNodeOnAnyFiles(repositoryName, dataNodeHoldingSearchableSnapshot);
    Settings restoredIndexSettings = Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot).build();
    final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest(searchableSnapshotIndexOutsideSearchRange, repositoryName, snapshotId.getName(), indexOutsideSearchRange, restoredIndexSettings, Strings.EMPTY_ARRAY, false, randomFrom(MountSearchableSnapshotRequest.Storage.values()));
    client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();
    final int searchableSnapshotShardCount = indexOutsideSearchRangeShardCount;
    final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange);
    assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
    DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex());
    assertThat(timestampFieldType, nullValue());
    RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME).from("2020-11-28T00:00:00.000000000Z", true).to("2020-11-29T00:00:00.000000000Z");
    SearchRequest request = new SearchRequest().indices(indexOutsideSearchRange, searchableSnapshotIndexOutsideSearchRange).source(new SearchSourceBuilder().query(rangeQuery));
    final int totalShards = indexOutsideSearchRangeShardCount + searchableSnapshotShardCount;
    {
        assertResponse(client().search(request), searchResponse -> {
            assertThat(searchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount));
            assertThat(searchResponse.getFailedShards(), equalTo(indexOutsideSearchRangeShardCount));
            assertThat(searchResponse.getSkippedShards(), equalTo(searchableSnapshotShardCount));
            assertThat(searchResponse.getTotalShards(), equalTo(totalShards));
            assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L));
        });
    }
    {
        boolean allowPartialSearchResults = true;
        SearchShardsRequest searchShardsRequest = new SearchShardsRequest(new String[] { indexOutsideSearchRange, searchableSnapshotIndexOutsideSearchRange }, SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
        SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
        assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards));
        List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
        List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
        List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
        assertThat(skipped.size(), equalTo(searchableSnapshotShardCount));
        assertThat(notSkipped.size(), equalTo(indexOutsideSearchRangeShardCount));
    }
    unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot);
    waitUntilRecoveryIsDone(searchableSnapshotIndexOutsideSearchRange);
    ensureGreen(searchableSnapshotIndexOutsideSearchRange);
    final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexOutsideSearchRange);
    final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange();
    final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex());
    assertThat(dateFieldType, notNullValue());
    final DateFieldMapper.Resolution resolution = dateFieldType.resolution();
    assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true));
    assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY)));
    assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-26T00:00:00Z"))));
    assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-27T00:00:00Z"))));
    internalCluster().stopNode(dataNodeHoldingSearchableSnapshot);
    waitUntilAllShardsAreUnassigned(updatedIndexMetadata.getIndex());
    assertBusy(() -> {
        assertResponse(client().search(request), newSearchResponse -> {
            assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards));
            assertThat(newSearchResponse.getFailedShards(), equalTo(0));
            assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1));
            assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards));
            assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo(0L));
        });
    });
    {
        boolean allowPartialSearchResults = true;
        SearchShardsRequest searchShardsRequest = new SearchShardsRequest(new String[] { indexOutsideSearchRange, searchableSnapshotIndexOutsideSearchRange }, SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
        SearchShardsResponse searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
        assertThat(searchShardsResponse.getGroups().size(), equalTo(totalShards));
        List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
        List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
        List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
        assertThat(skipped.size(), equalTo(totalShards));
        assertThat(notSkipped.size(), equalTo(0));
    }
}
285030.551127elasticsearch
public void setup() throws Exception {
    final RealmIdentifier realmIdentifier = new RealmIdentifier("saml", REALM_NAME);
    final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI());
    final Settings settings = Settings.builder().put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).put("path.home", createTempDir()).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_METADATA_PATH), metadata.toString()).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.IDP_ENTITY_ID), SamlRealmTests.TEST_IDP_ENTITY_ID).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.SP_ENTITY_ID), SP_URL).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.SP_ACS), SP_URL).put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid").put(getFullSettingKey(realmIdentifier, RealmSettings.ORDER_SETTING), 0).build();
    this.threadPool = new TestThreadPool("saml logout test thread pool", settings);
    final ThreadContext threadContext = this.threadPool.getThreadContext();
    AuthenticationTestHelper.builder().user(new User("kibana")).realmRef(new Authentication.RealmRef("realm", "type", "node")).build(false).writeToContext(threadContext);
    indexRequests = new ArrayList<>();
    bulkRequests = new ArrayList<>();
    client = mock(Client.class);
    when(client.threadPool()).thenReturn(threadPool);
    when(client.settings()).thenReturn(settings);
    doAnswer(invocationOnMock -> {
        GetRequestBuilder builder = new GetRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]).setId((String) invocationOnMock.getArguments()[1]);
        return builder;
    }).when(client).prepareGet(nullable(String.class), nullable(String.class));
    doAnswer(invocationOnMock -> {
        IndexRequestBuilder builder = new IndexRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]);
        return builder;
    }).when(client).prepareIndex(nullable(String.class));
    doAnswer(invocationOnMock -> {
        UpdateRequestBuilder builder = new UpdateRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]).setId((String) invocationOnMock.getArguments()[1]);
        return builder;
    }).when(client).prepareUpdate(nullable(String.class), nullable(String.class));
    doAnswer(invocationOnMock -> {
        BulkRequestBuilder builder = new BulkRequestBuilder(client);
        return builder;
    }).when(client).prepareBulk();
    when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client));
    doAnswer(invocationOnMock -> {
        ActionListener<MultiGetResponse> listener = (ActionListener<MultiGetResponse>) invocationOnMock.getArguments()[1];
        MultiGetResponse response = mock(MultiGetResponse.class);
        MultiGetItemResponse[] responses = new MultiGetItemResponse[2];
        when(response.getResponses()).thenReturn(responses);
        GetResponse oldGetResponse = mock(GetResponse.class);
        when(oldGetResponse.isExists()).thenReturn(false);
        responses[0] = new MultiGetItemResponse(oldGetResponse, null);
        GetResponse getResponse = mock(GetResponse.class);
        responses[1] = new MultiGetItemResponse(getResponse, null);
        when(getResponse.isExists()).thenReturn(false);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).multiGet(any(MultiGetRequest.class), any(ActionListener.class));
    doAnswer(invocationOnMock -> {
        IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[0];
        ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[1];
        indexRequests.add(indexRequest);
        final IndexResponse response = new IndexResponse(new ShardId("test", "test", 0), indexRequest.id(), 1, 1, 1, true);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).index(any(IndexRequest.class), any(ActionListener.class));
    doAnswer(invocationOnMock -> {
        IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[1];
        ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[2];
        indexRequests.add(indexRequest);
        final IndexResponse response = new IndexResponse(new ShardId("test", "test", 0), indexRequest.id(), 1, 1, 1, true);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).execute(eq(TransportIndexAction.TYPE), any(IndexRequest.class), any(ActionListener.class));
    doAnswer(invocationOnMock -> {
        BulkRequest bulkRequest = (BulkRequest) invocationOnMock.getArguments()[0];
        ActionListener<BulkResponse> listener = (ActionListener<BulkResponse>) invocationOnMock.getArguments()[1];
        bulkRequests.add(bulkRequest);
        final BulkResponse response = new BulkResponse(new BulkItemResponse[0], 1);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).bulk(any(BulkRequest.class), any(ActionListener.class));
    final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class);
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class));
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class));
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true);
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true);
    when(securityIndex.defensiveCopy()).thenReturn(securityIndex);
    final MockLicenseState licenseState = mock(MockLicenseState.class);
    when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true);
    final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
    final SecurityContext securityContext = new SecurityContext(settings, threadContext);
    tokenService = new TokenService(settings, Clock.systemUTC(), client, licenseState, securityContext, securityIndex, securityIndex, clusterService);
    final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
    final Realms realms = mock(Realms.class);
    action = new TransportSamlLogoutAction(transportService, mock(ActionFilters.class), realms, tokenService);
    final Environment env = TestEnvironment.newEnvironment(settings);
    final RealmConfig realmConfig = new RealmConfig(realmIdentifier, settings, env, threadContext);
    samlRealm = SamlRealm.create(realmConfig, mock(SSLService.class), mock(ResourceWatcherService.class), mock(UserRoleMapper.class));
    when(realms.realm(realmConfig.name())).thenReturn(samlRealm);
}
282535.441158elasticsearch
public void testBuildFullMetadataWithSigningAndTwoEncryptionCerts() throws Exception {
    final EntityDescriptor descriptor = new SamlSpMetadataBuilder(Locale.US, "https://kibana.apps.hydra/").serviceName("Hydra Kibana").nameIdFormat(NameID.PERSISTENT).withAttribute("uid", "urn:oid:0.9.2342.19200300.100.1.1").withAttribute("mail", "urn:oid:0.9.2342.19200300.100.1.3").withAttribute("groups", "urn:oid:1.3.6.1.4.1.5923.1.5.1.1").withAttribute(null, "urn:oid:2.16.840.1.113730.3.1.241").withAttribute(null, "urn:oid:1.3.6.1.4.1.5923.1.1.1.6").assertionConsumerServiceUrl("https://kibana.apps.hydra/saml/acs").singleLogoutServiceUrl("https://kibana.apps.hydra/saml/logout").authnRequestsSigned(true).signingCertificate(threeCertificates[0]).encryptionCertificates(Arrays.asList(threeCertificates[1], threeCertificates[2])).organization("Hydra", "Hydra", "https://hail.hydra/").withContact("administrative", "Wolfgang", "von Strucker", "baron.strucker@supreme.hydra").withContact("technical", "Paul", "Ebersol", "pne@tech.hydra").build();
    final Element element = new EntityDescriptorMarshaller().marshall(descriptor);
    final String xml = SamlUtils.toString(element);
    final String expectedCertificateOne = joinCertificateLines("MIIDWDCCAkCgAwIBAgIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA0GCSqGSIb3DQEBCwUAMB0xGzAZ", "BgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDAeFw0xNzExMjkwMjQ3MjZaFw0yMDExMjgwMjQ3MjZa", "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC", "AQoCggEBALHTuPGOieCbD2mZUdYrdH4ofo7qFze6rQUROCLKqf69uBuwvraNWOcwxHUTKVlLMV3d", "dKzYo+yfC44AMXrrV+79xVWsTCNHu9sxQzcDwiEx2OtOOX9MAk6tJQ3svNrMPNXWh8ftwmmY9XdF", "ZwMYUdo6FPjSQj5uQTDmGWRgF08f7VRlk6N92d/fzn9DlDm+TFuaOr17OTSR4B6RTrNwKC29AmXQ", "TwCijCObjLqyMEqP20dZCQeVf2qw8JKUHhW4r6mCLzqmeR+kRTqiHMSWxJddzxDGw6X7fOS7iuzB", "0+TnsKwgu8nYrEXds9MkGf1Yco7WsM43g+Es+LhNHP+es70CAwEAAaOBjjCBizAdBgNVHQ4EFgQU", "ILqVKGhIi8p5Xffsow/IKFLhRbIwWQYDVR0jBFIwUIAUILqVKGhIi8p5Xffsow/IKFLhRbKhIaQf", "MB0xGzAZBgNVBAMTEkVsYXN0aWNzZWFyY2gtU0FNTIIVANRTZaFrK+Pz19O8TZsb3HSJmAWpMA8G", "A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGhl4V9mp4SWSV2E3HAJ1PX+Vmp6k27K", "d0tkOk1B9fyA13QB30teyiL7RR0vSHRyWFY8rQH1mHD366GKRWLITRG/QPULamGdYXX4h0pFj5ld", "aubLxM/O9vEAxOgmo/lsdkeIq9tLBqY06r/5A/Mcgo63KGi00AFYBoyvqfOu6nRLPnQr+rKVfdNO", "pWeIiFY1i2XTNZ3CZjNPSTwiQMUzrCxKXB9lL0vF6QL2Gj2iBhzNfXi88wf7xaR6XKY1wNuv3HLP", "sL7n+PWby7LRX188dyS1dmKfQcrKL65OssBA5NC8CAYyBiygBmWN+5kVJM5fSb0SwPSoVWrNyz+8", "IUldQE8=");
    final String expectedCertificateTwo = joinCertificateLines("MIID0zCCArugAwIBAgIJALi5bDfjMszLMA0GCSqGSIb3DQEBCwUAMEgxDDAKBgNVBAoTA29yZzEW", "MBQGA1UECxMNZWxhc3RpY3NlYXJjaDEgMB4GA1UEAxMXRWxhc3RpY3NlYXJjaCBUZXN0IE5vZGUw", "HhcNMTUwOTIzMTg1MjU3WhcNMTkwOTIyMTg1MjU3WjBIMQwwCgYDVQQKEwNvcmcxFjAUBgNVBAsT", "DWVsYXN0aWNzZWFyY2gxIDAeBgNVBAMTF0VsYXN0aWNzZWFyY2ggVGVzdCBOb2RlMIIBIjANBgkq", "hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3rGZ1QbsW0+MuyrSLmMfDFKtLBkIFW8V0gRuurFg1PUK", "KNR1Mq2tMVwjjYETAU/UY0iKZOzjgvYPKhDTYBTte/WHR1ZK4CYVv7TQX/gtFQG/ge/c7u0sLch9", "p7fbd+/HZiLS/rBEZDIohvgUvzvnA8+OIYnw4kuxKo/5iboAIS41klMg/lATm8V71LMY68inht71", "/ZkQoAHKgcR9z4yNYvQ1WqKG8DG8KROXltll3sTrKbl5zJhn660es/1ZnR6nvwt6xnSTl/mNHMjk", "fv1bs4rJ/py3qPxicdoSIn/KyojUcgHVF38fuAy2CQTdjVG5fWj9iz+mQvLm3+qsIYQdFwIDAQAB", "o4G/MIG8MAkGA1UdEwQCMAAwHQYDVR0OBBYEFEMMWLWQi/g83PzlHYqAVnty5L7HMIGPBgNVHREE", "gYcwgYSCCWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWluggpsb2NhbGhvc3Q0ghdsb2Nh", "bGhvc3Q0LmxvY2FsZG9tYWluNIIKbG9jYWxob3N0NoIXbG9jYWxob3N0Ni5sb2NhbGRvbWFpbjaH", "BH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAMjGGXT8Nt1tbl2GkiKt", "miuGE2Ej66YuZ37WSJViaRNDVHLlg87TCcHek2rdO+6sFqQbbzEfwQ05T7xGmVu7tm54HwKMRugo", "Q3wct0bQC5wEWYN+oMDvSyO6M28mZwWb4VtR2IRyWP+ve5DHwTM9mxWa6rBlGzsQqH6YkJpZojzq", "k/mQTug+Y8aEmVoqRIPMHq9ob+S9qd5lp09+MtYpwPfTPx/NN+xMEooXWW/ARfpGhWPkg/FuCu4z", "1tFmCqHgNcWirzMm3dQpF78muE9ng6OB2MXQwL4VgnVkxmlZNHbkR2v/t8MyZJxCy4g6cTMM3S/U", "Mt5/+aIB2JAuMKyuD+A=");
    final String expectedCertificateThree = joinCertificateLines("MIID1zCCAr+gAwIBAgIJALnUl/KSS74pMA0GCSqGSIb3DQEBCwUAMEoxDDAKBgNVBAoTA29yZzEW", "MBQGA1UECxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVu", "dDAeFw0xNTA5MjMxODUyNTVaFw0xOTA5MjIxODUyNTVaMEoxDDAKBgNVBAoTA29yZzEWMBQGA1UE", "CxMNZWxhc3RpY3NlYXJjaDEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDCCASIw", "DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKm+P6vDAff0c6BWKGdhnYoNl9HijLIgfU3d9CQ", "cqKtwT+yUW3DPSVjIfaLmDIGj6Hl8jTHWPB7ZP4fzhrPi6m4qlRGclJMECBuNASZFiPDtEDv3mso", "eqOKQet6n7PZvgpWM7hxYZO4P1aMKJtRsFAdvBAdZUnv0spR5G4UZTHzSKmMeanIKFkLaD0XVKiL", "Qu9/z9M6roDQeAEoCJ/8JsanG8ih2ymfPHIZuNyYIOrVekHN2zU6bnVn8/PCeZSjS6h5xYw+Jl5g", "zGI/n+F5CZ+THoH8pM4pGp6xRVzpiH12gvERGwgSIDXdn/+uZZj+4lE7n2ENRSOt5KcOGG99r60C", "AwEAAaOBvzCBvDAJBgNVHRMEAjAAMB0GA1UdDgQWBBSSFhBXNp7AaNrHdlgCV0mCEzt7ajCBjwYD", "VR0RBIGHMIGEgglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFpboIKbG9jYWxob3N0NIIX", "bG9jYWxob3N0NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9zdDaCF2xvY2FsaG9zdDYubG9jYWxkb21h", "aW42hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUAA4IBAQANvAkddfLxn4/B", "CY4LY/1ET3d7ZRldjFTyjjHRYJ3CYBXWVahMskLxIcFNca8YjKfXoX8mcK+NQK/dAbGHXqk76yMl", "krKjh1OQiZ1YAX5ryYerGrZ99N3E9wnbn72bW3iumoLlqmTWlHEpMI0Ql6J75BQLTgKHxCPupVA5", "sTbWkKwGjXXAi84rUlzhDJOR8jk3/7ct0iZO8Hk6AWMcNix5Wka3IDGUXuEVevYRlxgVyCxcnZWC", "7JWREpar5aIPQFkY6VCEglxwUyXbHZw5T/u6XaKKnS7gz8RiwRh68ddSQJeEHi5e4onUD7bOCJgf", "siUwdiCkDbfN9Yum8OIpmBRs");
    final String expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<md:EntityDescriptor xmlns:md=\"urn:oasis:names:tc:SAML:2.0:metadata\" entityID=\"https://kibana.apps.hydra/\">" + "  <md:SPSSODescriptor AuthnRequestsSigned=\"true\"" + "      WantAssertionsSigned=\"true\"" + "      protocolSupportEnumeration=\"urn:oasis:names:tc:SAML:2.0:protocol\">" + "    <md:KeyDescriptor use=\"signing\">" + "      <ds:KeyInfo xmlns:ds=\"http://www.w3.org/2000/09/xmldsig#\">" + "        <ds:X509Data>" + "          <ds:X509Certificate>%(expectedCertificateOne)</ds:X509Certificate>" + "        </ds:X509Data>" + "      </ds:KeyInfo>" + "    </md:KeyDescriptor>" + "    <md:KeyDescriptor use=\"encryption\">" + "      <ds:KeyInfo xmlns:ds=\"http://www.w3.org/2000/09/xmldsig#\">" + "        <ds:X509Data>" + "          <ds:X509Certificate>%(expectedCertificateTwo)</ds:X509Certificate>" + "        </ds:X509Data>" + "      </ds:KeyInfo>" + "    </md:KeyDescriptor>" + "    <md:KeyDescriptor use=\"encryption\">" + "      <ds:KeyInfo xmlns:ds=\"http://www.w3.org/2000/09/xmldsig#\">" + "       <ds:X509Data>" + "         <ds:X509Certificate>%(expectedCertificateThree)</ds:X509Certificate>" + "       </ds:X509Data>" + "      </ds:KeyInfo>" + "    </md:KeyDescriptor>" + "    <md:SingleLogoutService" + "        Binding=\"urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect\"" + "        Location=\"https://kibana.apps.hydra/saml/logout\"/>" + "    <md:NameIDFormat>urn:oasis:names:tc:SAML:2.0:nameid-format:persistent</md:NameIDFormat>" + "    <md:AssertionConsumerService" + "        Binding=\"urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST\"" + "        Location=\"https://kibana.apps.hydra/saml/acs\"" + "        index=\"1\"" + "        isDefault=\"true\"/>" + "    <md:AttributeConsumingService index=\"1\" isDefault=\"true\">" + "      <md:ServiceName xml:lang=\"en-US\">Hydra Kibana</md:ServiceName>" + "      <md:RequestedAttribute" + "          FriendlyName=\"uid\"" + "          Name=\"urn:oid:0.9.2342.19200300.100.1.1\"" + "          NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:uri\"/>" + "      <md:RequestedAttribute" + "          FriendlyName=\"mail\"" + "          Name=\"urn:oid:0.9.2342.19200300.100.1.3\"" + "          NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:uri\"/>" + "      <md:RequestedAttribute" + "          FriendlyName=\"groups\"" + "          Name=\"urn:oid:1.3.6.1.4.1.5923.1.5.1.1\"" + "          NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:uri\"/>" + "      <md:RequestedAttribute" + "          Name=\"urn:oid:2.16.840.1.113730.3.1.241\"" + "          NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:uri\"/>" + "      <md:RequestedAttribute" + "          Name=\"urn:oid:1.3.6.1.4.1.5923.1.1.1.6\"" + "          NameFormat=\"urn:oasis:names:tc:SAML:2.0:attrname-format:uri\"/>" + "    </md:AttributeConsumingService>" + "  </md:SPSSODescriptor>" + "  <md:Organization>" + "    <md:OrganizationName xml:lang=\"en-US\">Hydra</md:OrganizationName>" + "      <md:OrganizationDisplayName xml:lang=\"en-US\">Hydra</md:OrganizationDisplayName>" + "    <md:OrganizationURL xml:lang=\"en-US\">https://hail.hydra/</md:OrganizationURL>" + "  </md:Organization>" + "  <md:ContactPerson contactType=\"administrative\">" + "    <md:GivenName>Wolfgang</md:GivenName>" + "    <md:SurName>von Strucker</md:SurName>" + "    <md:EmailAddress>baron.strucker@supreme.hydra</md:EmailAddress>" + "  </md:ContactPerson>" + "  <md:ContactPerson contactType=\"technical\">" + "    <md:GivenName>Paul</md:GivenName>" + "    <md:SurName>Ebersol</md:SurName>" + "    <md:EmailAddress>pne@tech.hydra</md:EmailAddress>" + "  </md:ContactPerson>" + "</md:EntityDescriptor>";
    final Map<String, Object> replacements = new HashMap<>();
    replacements.put("expectedCertificateOne", expectedCertificateOne);
    replacements.put("expectedCertificateTwo", expectedCertificateTwo);
    replacements.put("expectedCertificateThree", expectedCertificateThree);
    final String expectedXmlWithCertificate = NamedFormatter.format(expectedXml, replacements);
    assertThat(xml, equalTo(normaliseXml(expectedXmlWithCertificate)));
    assertValidXml(xml);
}
283888.091130elasticsearch
public void testLimitedIndicesAccessControl() {
    IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap());
    IndicesAccessControl limitedByIndicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap());
    IndicesAccessControl result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.isGranted(), is(true));
    assertThat(result.getIndexPermissions("_index"), is(nullValue()));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    indicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap());
    limitedByIndicesAccessControl = new IndicesAccessControl(false, Collections.emptyMap());
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.isGranted(), is(false));
    assertThat(result.getIndexPermissions("_index"), is(nullValue()));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    indicesAccessControl = new IndicesAccessControl(false, Collections.emptyMap());
    limitedByIndicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap());
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.isGranted(), is(false));
    assertThat(result.getIndexPermissions("_index"), is(nullValue()));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    indicesAccessControl = new IndicesAccessControl(false, Collections.emptyMap());
    limitedByIndicesAccessControl = new IndicesAccessControl(false, Collections.emptyMap());
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.isGranted(), is(false));
    assertThat(result.getIndexPermissions("_index"), is(nullValue()));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    indicesAccessControl = new IndicesAccessControl(true, Collections.singletonMap("_index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll())));
    limitedByIndicesAccessControl = new IndicesAccessControl(true, Collections.emptyMap());
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.getIndexPermissions("_index"), is(nullValue()));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    indicesAccessControl = new IndicesAccessControl(true, Collections.singletonMap("_index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll())));
    limitedByIndicesAccessControl = new IndicesAccessControl(true, Collections.singletonMap("_index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll())));
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.getIndexPermissions("_index"), is(notNullValue()));
    assertThat(result.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity(), is(false));
    assertThat(result.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(false));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    final FieldPermissions fieldPermissions1 = new FieldPermissions(new FieldPermissionsDefinition(new String[] { "f1", "f2", "f3*" }, new String[] { "f3" }));
    final FieldPermissions fieldPermissions2 = new FieldPermissions(new FieldPermissionsDefinition(new String[] { "f1", "f3*", "f4" }, new String[] { "f3" }));
    indicesAccessControl = new IndicesAccessControl(true, Map.ofEntries(Map.entry("_index", new IndexAccessControl(fieldPermissions1, DocumentPermissions.allowAll())), Map.entry("another-index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll()))));
    limitedByIndicesAccessControl = new IndicesAccessControl(true, Map.ofEntries(Map.entry("_index", new IndexAccessControl(fieldPermissions2, DocumentPermissions.allowAll())), Map.entry("another-index", new IndexAccessControl(fieldPermissions2, DocumentPermissions.allowAll()))));
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.getIndexPermissions("_index"), is(notNullValue()));
    assertThat(result.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity(), is(true));
    assertThat(result.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(false));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), containsInAnyOrder("_index", "another-index"));
    assertThat(result.getIndicesWithFieldLevelSecurity(), containsInAnyOrder("_index", "another-index"));
    assertThat(result.getIndicesWithDocumentLevelSecurity(), emptyIterable());
    FieldPermissions resultFieldPermissions = result.getIndexPermissions("_index").getFieldPermissions();
    assertThat(resultFieldPermissions.grantsAccessTo("f1"), is(true));
    assertThat(resultFieldPermissions.grantsAccessTo("f2"), is(false));
    assertThat(resultFieldPermissions.grantsAccessTo("f3"), is(false));
    assertThat(resultFieldPermissions.grantsAccessTo("f31"), is(true));
    assertThat(resultFieldPermissions.grantsAccessTo("f4"), is(false));
    Set<BytesReference> queries = Collections.singleton(new BytesArray("{\"match_all\" : {}}"));
    final DocumentPermissions documentPermissions1 = DocumentPermissions.filteredBy(queries);
    assertThat(documentPermissions1, is(notNullValue()));
    assertThat(documentPermissions1.hasDocumentLevelPermissions(), is(true));
    assertThat(documentPermissions1.getSingleSetOfQueries(), equalTo(queries));
    final DocumentPermissions documentPermissions2 = DocumentPermissions.filteredBy(Set.of(new BytesArray("{\"term\":{ \"public\":true } }")));
    indicesAccessControl = new IndicesAccessControl(true, Map.ofEntries(Map.entry("_index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll())), Map.entry("another-index", new IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions2))));
    limitedByIndicesAccessControl = new IndicesAccessControl(true, Map.ofEntries(Map.entry("_index", new IndexAccessControl(FieldPermissions.DEFAULT, documentPermissions1)), Map.entry("another-index", new IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.allowAll()))));
    result = indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
    assertThat(result, is(notNullValue()));
    assertThat(result.getIndexPermissions("_index"), is(notNullValue()));
    assertThat(result.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity(), is(false));
    assertThat(result.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(result.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(queries));
    assertThat(result.getIndicesWithFieldOrDocumentLevelSecurity(), containsInAnyOrder("_index", "another-index"));
    assertThat(result.getIndicesWithFieldLevelSecurity(), emptyIterable());
    assertThat(result.getIndicesWithDocumentLevelSecurity(), containsInAnyOrder("_index", "another-index"));
}
281115.839117elasticsearch
public static SQLType sqlType(DataType dataType) {
    if (dataType == UNSUPPORTED) {
        return JDBCType.OTHER;
    }
    if (dataType == NULL) {
        return JDBCType.NULL;
    }
    if (dataType == BOOLEAN) {
        return JDBCType.BOOLEAN;
    }
    if (dataType == BYTE) {
        return JDBCType.TINYINT;
    }
    if (dataType == SHORT) {
        return JDBCType.SMALLINT;
    }
    if (dataType == INTEGER) {
        return JDBCType.INTEGER;
    }
    if (dataType == LONG) {
        return JDBCType.BIGINT;
    }
    if (dataType == UNSIGNED_LONG) {
        return JDBCType.NUMERIC;
    }
    if (dataType == DOUBLE) {
        return JDBCType.DOUBLE;
    }
    if (dataType == FLOAT) {
        return JDBCType.REAL;
    }
    if (dataType == HALF_FLOAT) {
        return JDBCType.FLOAT;
    }
    if (dataType == SCALED_FLOAT) {
        return JDBCType.DOUBLE;
    }
    if (dataType == KEYWORD) {
        return JDBCType.VARCHAR;
    }
    if (dataType == TEXT) {
        return JDBCType.VARCHAR;
    }
    if (isDateTime(dataType)) {
        return JDBCType.TIMESTAMP;
    }
    if (dataType == IP) {
        return JDBCType.VARCHAR;
    }
    if (dataType == VERSION) {
        return JDBCType.VARCHAR;
    }
    if (dataType == BINARY) {
        return JDBCType.BINARY;
    }
    if (dataType == OBJECT) {
        return JDBCType.STRUCT;
    }
    if (dataType == NESTED) {
        return JDBCType.STRUCT;
    }
    if (dataType == DATE) {
        return JDBCType.DATE;
    }
    if (dataType == TIME) {
        return JDBCType.TIME;
    }
    if (dataType == GEO_SHAPE) {
        return ExtTypes.GEOMETRY;
    }
    if (dataType == GEO_POINT) {
        return ExtTypes.GEOMETRY;
    }
    if (dataType == SHAPE) {
        return ExtTypes.GEOMETRY;
    }
    if (dataType == INTERVAL_YEAR) {
        return ExtTypes.INTERVAL_YEAR;
    }
    if (dataType == INTERVAL_MONTH) {
        return ExtTypes.INTERVAL_MONTH;
    }
    if (dataType == INTERVAL_DAY) {
        return ExtTypes.INTERVAL_DAY;
    }
    if (dataType == INTERVAL_HOUR) {
        return ExtTypes.INTERVAL_HOUR;
    }
    if (dataType == INTERVAL_MINUTE) {
        return ExtTypes.INTERVAL_MINUTE;
    }
    if (dataType == INTERVAL_SECOND) {
        return ExtTypes.INTERVAL_SECOND;
    }
    if (dataType == INTERVAL_YEAR_TO_MONTH) {
        return ExtTypes.INTERVAL_YEAR_TO_MONTH;
    }
    if (dataType == INTERVAL_DAY_TO_HOUR) {
        return ExtTypes.INTERVAL_DAY_TO_HOUR;
    }
    if (dataType == INTERVAL_DAY_TO_MINUTE) {
        return ExtTypes.INTERVAL_DAY_TO_MINUTE;
    }
    if (dataType == INTERVAL_DAY_TO_SECOND) {
        return ExtTypes.INTERVAL_DAY_TO_SECOND;
    }
    if (dataType == INTERVAL_HOUR_TO_MINUTE) {
        return ExtTypes.INTERVAL_HOUR_TO_MINUTE;
    }
    if (dataType == INTERVAL_HOUR_TO_SECOND) {
        return ExtTypes.INTERVAL_HOUR_TO_SECOND;
    }
    if (dataType == INTERVAL_MINUTE_TO_SECOND) {
        return ExtTypes.INTERVAL_MINUTE_TO_SECOND;
    }
    return null;
}
28953.9239117elasticsearch
public static int defaultPrecision(DataType dataType) {
    if (dataType == UNSUPPORTED) {
        return dataType.size();
    }
    if (dataType == NULL) {
        return dataType.size();
    }
    if (dataType == BOOLEAN) {
        return dataType.size();
    }
    if (dataType == BYTE) {
        return 3;
    }
    if (dataType == SHORT) {
        return 5;
    }
    if (dataType == INTEGER) {
        return 10;
    }
    if (dataType == LONG) {
        return 19;
    }
    if (dataType == UNSIGNED_LONG) {
        return 20;
    }
    if (dataType == DOUBLE) {
        return 15;
    }
    if (dataType == FLOAT) {
        return 7;
    }
    if (dataType == HALF_FLOAT) {
        return 3;
    }
    if (dataType == SCALED_FLOAT) {
        return 15;
    }
    if (dataType == KEYWORD) {
        return 15;
    }
    if (dataType == TEXT) {
        return 32766;
    }
    if (isDateTime(dataType)) {
        return 9;
    }
    if (dataType == IP) {
        return dataType.size();
    }
    if (dataType == VERSION) {
        return dataType.size();
    }
    if (dataType == BINARY) {
        return dataType.size();
    }
    if (dataType == OBJECT) {
        return dataType.size();
    }
    if (dataType == NESTED) {
        return dataType.size();
    }
    if (dataType == DATE) {
        return 3;
    }
    if (dataType == TIME) {
        return 9;
    }
    if (dataType == GEO_SHAPE) {
        return dataType.size();
    }
    if (dataType == GEO_POINT) {
        return Integer.MAX_VALUE;
    }
    if (dataType == SHAPE) {
        return dataType.size();
    }
    if (dataType == INTERVAL_YEAR) {
        return 7;
    }
    if (dataType == INTERVAL_MONTH) {
        return 7;
    }
    if (dataType == INTERVAL_DAY) {
        return 23;
    }
    if (dataType == INTERVAL_HOUR) {
        return 23;
    }
    if (dataType == INTERVAL_MINUTE) {
        return 23;
    }
    if (dataType == INTERVAL_SECOND) {
        return 23;
    }
    if (dataType == INTERVAL_YEAR_TO_MONTH) {
        return 7;
    }
    if (dataType == INTERVAL_DAY_TO_HOUR) {
        return 23;
    }
    if (dataType == INTERVAL_DAY_TO_MINUTE) {
        return 23;
    }
    if (dataType == INTERVAL_DAY_TO_SECOND) {
        return 23;
    }
    if (dataType == INTERVAL_HOUR_TO_MINUTE) {
        return 23;
    }
    if (dataType == INTERVAL_HOUR_TO_SECOND) {
        return 23;
    }
    if (dataType == INTERVAL_MINUTE_TO_SECOND) {
        return 23;
    }
    return 0;
}
282253.8525114elasticsearch
 CharsetMatch findCharset(List<String> explanation, InputStream inputStream, TimeoutChecker timeoutChecker) throws Exception {
    if (inputStream.markSupported() == false) {
        inputStream = new BufferedInputStream(inputStream, BUFFER_SIZE);
    }
    CharsetDetector charsetDetector = new CharsetDetector().setText(inputStream);
    CharsetMatch[] charsetMatches = charsetDetector.detectAll();
    timeoutChecker.check("character set detection");
    boolean pureAscii = true;
    int evenPosZeroCount = 0;
    int oddPosZeroCount = 0;
    inputStream.mark(BUFFER_SIZE);
    byte[] workspace = new byte[BUFFER_SIZE];
    int remainingLength = BUFFER_SIZE;
    do {
        int bytesRead = inputStream.read(workspace, 0, remainingLength);
        if (bytesRead <= 0) {
            break;
        }
        for (int i = 0; i < bytesRead; ++i) {
            if (workspace[i] == 0) {
                pureAscii = false;
                if (i % 2 == 0) {
                    ++evenPosZeroCount;
                } else {
                    ++oddPosZeroCount;
                }
            } else {
                pureAscii = pureAscii && workspace[i] > 0 && workspace[i] < 128;
            }
        }
        remainingLength -= bytesRead;
    } while (remainingLength > 0);
    inputStream.reset();
    boolean containsZeroBytes = evenPosZeroCount > 0 || oddPosZeroCount > 0;
    timeoutChecker.check("character set detection");
    if (pureAscii) {
        Optional<CharsetMatch> utf8CharsetMatch = Arrays.stream(charsetMatches).filter(charsetMatch -> StandardCharsets.UTF_8.name().equals(charsetMatch.getName())).findFirst();
        if (utf8CharsetMatch.isPresent()) {
            explanation.add("Using character encoding [" + StandardCharsets.UTF_8.name() + "], which matched the input with [" + utf8CharsetMatch.get().getConfidence() + "%] confidence - first [" + (BUFFER_SIZE / 1024) + "kB] of input was pure ASCII");
            return utf8CharsetMatch.get();
        }
    }
    for (CharsetMatch charsetMatch : charsetMatches) {
        String name = charsetMatch.getName();
        if (Charset.isSupported(name) && FILEBEAT_SUPPORTED_ENCODINGS.contains(name.toLowerCase(Locale.ROOT))) {
            boolean spaceEncodingContainsZeroByte = false;
            Charset charset = Charset.forName(name);
            if (charset.canEncode()) {
                byte[] spaceBytes = " ".getBytes(charset);
                for (int i = 0; i < spaceBytes.length && spaceEncodingContainsZeroByte == false; ++i) {
                    spaceEncodingContainsZeroByte = (spaceBytes[i] == 0);
                }
            }
            if (containsZeroBytes && spaceEncodingContainsZeroByte == false) {
                explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + "%] confidence but was rejected as the input contains zero bytes and the [" + name + "] encoding does not");
            } else if (containsZeroBytes && 3 * oddPosZeroCount > 2 * evenPosZeroCount && 3 * evenPosZeroCount > 2 * oddPosZeroCount) {
                explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + "%] confidence but was rejected as the distribution of zero bytes between odd and even positions in the " + "text is very close - [" + evenPosZeroCount + "] and [" + oddPosZeroCount + "] in the first [" + (BUFFER_SIZE / 1024) + "kB] of input");
            } else {
                explanation.add("Using character encoding [" + name + "], which matched the input with [" + charsetMatch.getConfidence() + "%] confidence");
                return charsetMatch;
            }
        } else {
            explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + "%] confidence but was rejected as it is not supported by [" + (Charset.isSupported(name) ? "Filebeat" : "the JVM") + "]");
        }
    }
    throw new IllegalArgumentException("Could not determine a usable character encoding for the input" + (containsZeroBytes ? " - could it be binary data?" : ""));
}
283235.893783elasticsearch
private static Table buildTable(GetTransformAction.Response response, GetTransformStatsAction.Response statsResponse) {
    Table table = getTableWithHeader();
    Map<String, TransformStats> statsById = statsResponse.getTransformsStats().stream().collect(Collectors.toMap(TransformStats::getId, Function.identity()));
    response.getTransformConfigurations().forEach(config -> {
        TransformStats stats = statsById.get(config.getId());
        TransformCheckpointingInfo checkpointingInfo = null;
        TransformIndexerStats transformIndexerStats = null;
        if (stats != null) {
            checkpointingInfo = stats.getCheckpointingInfo();
            transformIndexerStats = stats.getIndexerStats();
        }
        Integer maxPageSearchSize = config.getSettings() == null || config.getSettings().getMaxPageSearchSize() == null ? config.getPivotConfig() == null || config.getPivotConfig().getMaxPageSearchSize() == null ? Transform.DEFAULT_INITIAL_MAX_PAGE_SEARCH_SIZE : config.getPivotConfig().getMaxPageSearchSize() : config.getSettings().getMaxPageSearchSize();
        Double progress = checkpointingInfo == null ? null : checkpointingInfo.getNext().getCheckpointProgress() == null ? null : checkpointingInfo.getNext().getCheckpointProgress().getPercentComplete();
        table.startRow().addCell(config.getId()).addCell(stats == null ? null : stats.getState().toString()).addCell(checkpointingInfo == null ? null : checkpointingInfo.getLast().getCheckpoint()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getNumDocuments()).addCell(progress == null ? null : String.format(Locale.ROOT, "%.2f", progress)).addCell(checkpointingInfo == null ? null : checkpointingInfo.getLastSearchTime() == null ? null : Date.from(checkpointingInfo.getLastSearchTime())).addCell(checkpointingInfo == null ? null : checkpointingInfo.getChangesLastDetectedAt() == null ? null : Date.from(checkpointingInfo.getChangesLastDetectedAt())).addCell(config.getCreateTime() == null ? null : Date.from(config.getCreateTime())).addCell(config.getVersion()).addCell(String.join(",", config.getSource().getIndex())).addCell(config.getDestination().getIndex()).addCell(config.getDestination().getPipeline()).addCell(config.getDescription()).addCell(config.getSyncConfig() == null ? "batch" : "continuous").addCell(config.getFrequency() == null ? Transform.DEFAULT_TRANSFORM_FREQUENCY : config.getFrequency()).addCell(maxPageSearchSize).addCell(config.getSettings() == null || config.getSettings().getDocsPerSecond() == null ? "-" : config.getSettings().getDocsPerSecond()).addCell(stats == null ? null : stats.getReason()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getSearchTotal()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getSearchFailures()).addCell(transformIndexerStats == null ? null : TimeValue.timeValueMillis(transformIndexerStats.getSearchTime())).addCell(transformIndexerStats == null ? null : transformIndexerStats.getIndexTotal()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getIndexFailures()).addCell(transformIndexerStats == null ? null : TimeValue.timeValueMillis(transformIndexerStats.getIndexTime())).addCell(transformIndexerStats == null ? null : transformIndexerStats.getOutputDocuments()).addCell(transformIndexerStats == null ? null : TimeValue.timeValueMillis(transformIndexerStats.getDeleteTime())).addCell(transformIndexerStats == null ? null : transformIndexerStats.getNumDeletedDocuments()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getNumInvocations()).addCell(transformIndexerStats == null ? null : transformIndexerStats.getNumPages()).addCell(transformIndexerStats == null ? null : TimeValue.timeValueMillis(transformIndexerStats.getProcessingTime())).addCell(transformIndexerStats == null ? null : String.format(Locale.ROOT, "%.2f", transformIndexerStats.getExpAvgCheckpointDurationMs())).addCell(transformIndexerStats == null ? null : String.format(Locale.ROOT, "%.2f", transformIndexerStats.getExpAvgDocumentsIndexed())).addCell(transformIndexerStats == null ? null : String.format(Locale.ROOT, "%.2f", transformIndexerStats.getExpAvgDocumentsProcessed())).endRow();
    });
    return table;
}
282886.3221104elasticsearch
public void testConfig() throws Exception {
    String accountName = "_name";
    Settings.Builder builder = Settings.builder();
    Profile profile = rarely() ? Profile.STANDARD : randomFrom(Profile.values());
    if (profile != Profile.STANDARD) {
        builder.put("profile", profile.name());
    }
    Account.Config.EmailDefaults emailDefaults;
    if (randomBoolean()) {
        Settings.Builder sb = Settings.builder();
        if (randomBoolean()) {
            sb.put(Email.Field.FROM.getPreferredName(), "from@domain");
        }
        if (randomBoolean()) {
            sb.put(Email.Field.REPLY_TO.getPreferredName(), "replyto@domain");
        }
        if (randomBoolean()) {
            sb.put(Email.Field.PRIORITY.getPreferredName(), randomFrom(Email.Priority.values()));
        }
        if (randomBoolean()) {
            sb.put(Email.Field.TO.getPreferredName(), "to@domain");
        }
        if (randomBoolean()) {
            sb.put(Email.Field.CC.getPreferredName(), "cc@domain");
        }
        if (randomBoolean()) {
            sb.put(Email.Field.BCC.getPreferredName(), "bcc@domain");
        }
        if (randomBoolean()) {
            sb.put(Email.Field.SUBJECT.getPreferredName(), "_subject");
        }
        Settings settings = sb.build();
        emailDefaults = new Account.Config.EmailDefaults(accountName, settings);
        for (String name : settings.names()) {
            builder.put("email_defaults." + name, settings.get(name));
        }
    } else {
        emailDefaults = new Account.Config.EmailDefaults(accountName, Settings.EMPTY);
    }
    Properties smtpProps = new Properties();
    Settings.Builder smtpBuilder = Settings.builder();
    String host = "somehost";
    String setting = randomFrom("host", "localaddress", "local_address");
    smtpBuilder.put(setting, host);
    if (setting.equals("local_address")) {
        setting = "localaddress";
    }
    smtpProps.put("mail.smtp." + setting, host);
    String user = null;
    if (randomBoolean()) {
        user = randomAlphaOfLength(5);
        setting = randomFrom("user", "from");
        smtpBuilder.put(setting, user);
        smtpProps.put("mail.smtp." + setting, user);
    }
    int port = 25;
    if (randomBoolean()) {
        port = randomIntBetween(2000, 2500);
        setting = randomFrom("port", "localport", "local_port");
        smtpBuilder.put(setting, port);
        if (setting.equals("local_port")) {
            setting = "localport";
        }
        smtpProps.setProperty("mail.smtp." + setting, String.valueOf(port));
    }
    String password = null;
    if (randomBoolean()) {
        password = randomAlphaOfLength(8);
        final MockSecureSettings secureSettings = new MockSecureSettings();
        secureSettings.setString("smtp." + Account.SECURE_PASSWORD_SETTING.getKey(), password);
        builder.setSecureSettings(secureSettings);
    }
    for (int i = 0; i < 5; i++) {
        String name = randomAlphaOfLength(5);
        String value = randomAlphaOfLength(6);
        smtpProps.put("mail.smtp." + name, value);
        smtpBuilder.put(name, value);
    }
    for (String name : new String[] { "connection_timeout", "write_timeout", "timeout" }) {
        String propertyName = name.replaceAll("_", "");
        smtpProps.put("mail.smtp." + propertyName, String.valueOf(TimeValue.parseTimeValue(Account.DEFAULT_SMTP_TIMEOUT_SETTINGS.get(name), name).millis()));
    }
    Settings smtpSettings = smtpBuilder.build();
    for (String name : smtpSettings.names()) {
        builder.put("smtp." + name, smtpSettings.get(name));
    }
    Settings settings = builder.build();
    Account.Config config = new Account.Config(accountName, settings, null, logger);
    assertThat(config.profile, is(profile));
    assertThat(config.defaults, equalTo(emailDefaults));
    assertThat(config.smtp, notNullValue());
    assertThat(config.smtp.port, is(port));
    assertThat(config.smtp.host, is(host));
    assertThat(config.smtp.user, is(user));
    if (password != null) {
        assertThat(config.smtp.password.getChars(), is(password.toCharArray()));
    } else {
        assertThat(config.smtp.password, nullValue());
    }
    assertThat(config.smtp.properties, equalTo(smtpProps));
}
28215.221320gwt
public void testClassDependencySignature() {
    final MockJavaResource CLASS_DEP_ORIG = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_NO_CHANGE = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_NO_PRIVATE = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_NO_PROTECTED_FIELD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_NO_DEFAULT_FIELD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_NO_PUBLIC_FIELD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_FIELD_VALUE_CHANGE = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 99;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_ORDER = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  public int fieldPublic;\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_INNER = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("  public static class IgnoreMe {\n");
            code.append("    private int ignoreThisMember;\n");
            code.append("  }\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_DEPRECATED_FIELD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  @Deprecated\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_DEPRECATED_METHOD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  @Deprecated\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_ANNOTATED_FIELD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  @TestAnnotation(\"Foo\")\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_ANNOTATED_METHOD = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  @TestAnnotation(\"Foo\")\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource CLASS_DEP_JAVADOC = new MockJavaResource(CLASS_DEP_TYPE_NAME) {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public class ClassDependency {\n");
            code.append("  /** a static field */\n");
            code.append("  static public final int fieldPublicStatic = 100;\n");
            code.append("  /** a public field */\n");
            code.append("  public int fieldPublic;\n");
            code.append("  protected int fieldProtected;\n");
            code.append("  int fieldDefault;\n");
            code.append("  private int fieldPrivate;\n");
            code.append("  /** a public method */\n");
            code.append("  public int methodPublic() {return 1;};\n");
            code.append("  protected int methodProtected(String arg) {return 1;};\n");
            code.append("  int methodDefault() {return 1;};\n");
            code.append("  private int methodPrivate(){return 1;};\n");
            code.append("}");
            return code;
        }
    };
    final MockJavaResource TEST_ANNOTATION = new MockJavaResource("test.TestAnnotation") {

        @Override
        public CharSequence getContent() {
            StringBuilder code = new StringBuilder();
            code.append("package test;\n");
            code.append("public @interface TestAnnotation {\n");
            code.append("  String value();");
            code.append("}\n");
            return code;
        }
    };
    CompiledClass originalClass = buildClass(CLASS_DEP_ORIG);
    assertNotNull(originalClass);
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_NO_CHANGE));
    assertSignaturesNotEqual(originalClass, buildClass(CLASS_DEP_NO_PRIVATE));
    assertSignaturesNotEqual(originalClass, buildClass(CLASS_DEP_NO_PUBLIC_FIELD));
    assertSignaturesNotEqual(originalClass, buildClass(CLASS_DEP_NO_PROTECTED_FIELD));
    assertSignaturesNotEqual(originalClass, buildClass(CLASS_DEP_NO_DEFAULT_FIELD));
    assertSignaturesNotEqual(originalClass, buildClass(CLASS_DEP_FIELD_VALUE_CHANGE));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_ORDER));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_INNER));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_DEPRECATED_FIELD));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_DEPRECATED_METHOD));
    oracle.add(TEST_ANNOTATION);
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_ANNOTATED_FIELD));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_ANNOTATED_METHOD));
    assertSignaturesEqual(originalClass, buildClass(CLASS_DEP_JAVADOC));
}
282331.8527112gwt
public String generate(TreeLogger logger, SortedSet<String> possibleValues, String fallback, SortedSet<ConfigurationProperty> configProperties) throws UnableToCompleteException {
    String localeQueryParam = null;
    String localeCookie = null;
    boolean localeUserAgent = false;
    boolean localeUseMeta = false;
    String localeSearchOrder = "queryparam,cookie,meta,useragent";
    for (ConfigurationProperty configProp : configProperties) {
        String name = configProp.getName();
        if (LOCALE_QUERYPARAM.equals(name)) {
            localeQueryParam = configProp.getValues().get(0);
            if (localeQueryParam != null && localeQueryParam.length() != 0 && !validateQueryParam(localeQueryParam)) {
                logger.log(TreeLogger.WARN, "Ignoring invalid value of '" + localeQueryParam + "' from '" + LOCALE_QUERYPARAM + "', not a valid query parameter name");
                localeQueryParam = null;
            }
        } else if (LOCALE_COOKIE.equals(name)) {
            localeCookie = configProp.getValues().get(0);
            if (localeCookie != null && localeCookie.length() != 0 && !validateCookieName(localeCookie)) {
                logger.log(TreeLogger.WARN, "Ignoring invalid value of '" + localeCookie + "' from '" + LOCALE_COOKIE + "', not a valid cookie name");
                localeCookie = null;
            }
        } else if (LOCALE_USEMETA.equals(name)) {
            localeUseMeta = isTrue(configProp.getValues().get(0));
        } else if (LOCALE_USERAGENT.equals(name)) {
            localeUserAgent = isTrue(configProp.getValues().get(0));
        } else if (LOCALE_SEARCHORDER.equals(name)) {
            localeSearchOrder = configProp.getValues().get(0);
        }
    }
    localeSearchOrder = localeSearchOrder.trim();
    if (localeSearchOrder == null || localeSearchOrder.length() == 0) {
        localeSearchOrder = "queryparam,cookie,meta,useragent";
    }
    if (fallback == null) {
        fallback = "default";
    }
    StringSourceWriter body = new StringSourceWriter();
    body.println("{");
    body.indent();
    body.println("var locale = null;");
    body.println("var rtlocale = '" + fallback + "';");
    body.println("try {");
    for (String method : localeSearchOrder.split(",")) {
        if ("queryparam".equals(method)) {
            if (localeQueryParam != null && localeQueryParam.length() > 0) {
                body.println("if (!locale) {");
                body.indent();
                generateQueryParamLookup(logger, body, localeQueryParam);
                body.outdent();
                body.println("}");
            }
        } else if ("cookie".equals(method)) {
            if (localeCookie != null && localeCookie.length() > 0) {
                body.println("if (!locale) {");
                body.indent();
                generateCookieLookup(logger, body, localeCookie);
                body.outdent();
                body.println("}");
            }
        } else if ("meta".equals(method)) {
            if (localeUseMeta) {
                body.println("if (!locale) {");
                body.indent();
                generateMetaLookup(logger, body);
                body.outdent();
                body.println("}");
            }
        } else if ("useragent".equals(method)) {
            if (localeUserAgent) {
                body.println("if (!locale) {");
                body.indent();
                generateUserAgentLookup(logger, body);
                body.outdent();
                body.println("}");
            }
        } else {
            logger.log(TreeLogger.WARN, "Ignoring unknown locale lookup method \"" + method + "\"");
            body.println("// ignoring invalid lookup method '" + method + "'");
        }
    }
    body.println("if (!locale) {");
    body.indent();
    body.println("locale = $wnd['__gwt_Locale'];");
    body.outdent();
    body.println("}");
    body.println("if (locale) {");
    body.indent();
    body.println("rtlocale = locale;");
    body.outdent();
    body.println("}");
    generateInheritanceLookup(logger, body);
    body.outdent();
    body.println("} catch (e) {");
    body.indent();
    body.println("alert(\"Unexpected exception in locale detection, using " + "default: \" + e);\n");
    body.outdent();
    body.println("}");
    body.println("$wnd['__gwt_Locale'] = rtlocale;");
    body.println("return locale || \"" + fallback + "\";");
    body.outdent();
    body.println("}");
    return body.toString();
}
281758.434098gwt
public boolean calcDate(Date date, boolean strict) {
    if (this.era == 0 && this.year > 0) {
        this.year = -(this.year - 1);
    }
    if (this.year > Integer.MIN_VALUE) {
        date.setYear(this.year - JS_START_YEAR);
    }
    int orgDayOfMonth = date.getDate();
    date.setDate(1);
    if (this.month >= 0) {
        date.setMonth(this.month);
    }
    if (this.dayOfMonth >= 0) {
        date.setDate(this.dayOfMonth);
    } else if (this.month >= 0) {
        Date tmp = new Date(date.getYear(), date.getMonth(), 35);
        int daysInCurrentMonth = 35 - tmp.getDate();
        date.setDate(Math.min(daysInCurrentMonth, orgDayOfMonth));
    } else {
        date.setDate(orgDayOfMonth);
    }
    if (this.hours < 0) {
        this.hours = date.getHours();
    }
    if (this.ampm > 0) {
        if (this.hours < 12) {
            this.hours += 12;
        }
    }
    date.setHours(this.hours == 24 && this.midnightIs24 ? 0 : this.hours);
    if (this.minutes >= 0) {
        date.setMinutes(this.minutes);
    }
    if (this.seconds >= 0) {
        date.setSeconds(this.seconds);
    }
    if (this.milliseconds >= 0) {
        date.setTime(date.getTime() / 1000 * 1000 + this.milliseconds);
    }
    if (strict) {
        if ((this.year > Integer.MIN_VALUE) && ((this.year - JS_START_YEAR) != date.getYear())) {
            return false;
        }
        if ((this.month >= 0) && (this.month != date.getMonth())) {
            return false;
        }
        if ((this.dayOfMonth >= 0) && (this.dayOfMonth != date.getDate())) {
            return false;
        }
        if (this.hours == 24 && this.midnightIs24) {
            if (this.ampm > 0) {
                return false;
            }
        } else if (this.hours >= 24) {
            return false;
        } else if (this.hours == 0 && this.midnightIs24) {
            return false;
        }
        if (this.minutes >= 60) {
            return false;
        }
        if (this.seconds >= 60) {
            return false;
        }
        if (this.milliseconds >= 1000) {
            return false;
        }
    }
    if (this.ambiguousYear) {
        Date defaultCenturyStart = new Date();
        defaultCenturyStart.setYear(defaultCenturyStart.getYear() - 80);
        if (date.before(defaultCenturyStart)) {
            date.setYear(defaultCenturyStart.getYear() + 100);
        }
    }
    if (this.dayOfWeek >= 0) {
        if (this.dayOfMonth == -1) {
            int adjustment = (7 + this.dayOfWeek - date.getDay()) % 7;
            if (adjustment > 3) {
                adjustment -= 7;
            }
            int orgMonth = date.getMonth();
            date.setDate(date.getDate() + adjustment);
            if (date.getMonth() != orgMonth) {
                date.setDate(date.getDate() + (adjustment > 0 ? -7 : 7));
            }
        } else {
            if (date.getDay() != this.dayOfWeek) {
                return false;
            }
        }
    }
    if (this.tzOffset > Integer.MIN_VALUE) {
        int offset = date.getTimezoneOffset();
        date.setTime(date.getTime() + (this.tzOffset - offset) * 60 * 1000);
    }
    return true;
}
282400.720122gwt
private void writeTraversal(SourceWriter sw, AutoBeanType type) {
    List<AutoBeanMethod> referencedSetters = new ArrayList<AutoBeanMethod>();
    sw.println("@Override protected void traverseProperties(%s visitor, %s ctx) {", AutoBeanVisitor.class.getCanonicalName(), OneShotContext.class.getCanonicalName());
    sw.indent();
    sw.println("%s bean;", AbstractAutoBean.class.getCanonicalName());
    sw.println("Object value;");
    sw.println("%s propertyContext;", ClientPropertyContext.class.getCanonicalName());
    sw.println("%1$s as = as();", type.getPeerType().getQualifiedSourceName());
    for (AutoBeanMethod method : type.getMethods()) {
        if (!method.getAction().equals(JBeanMethod.GET)) {
            continue;
        }
        AutoBeanMethod setter = null;
        if (!type.isSimpleBean()) {
            for (AutoBeanMethod maybeSetter : type.getMethods()) {
                boolean isASetter = maybeSetter.getAction().equals(JBeanMethod.SET) || maybeSetter.getAction().equals(JBeanMethod.SET_BUILDER);
                if (isASetter && maybeSetter.getPropertyName().equals(method.getPropertyName())) {
                    setter = maybeSetter;
                    break;
                }
            }
        }
        String valueExpression = String.format("bean = (%1$s) %2$s.getAutoBean(as.%3$s());", AbstractAutoBean.class.getCanonicalName(), AutoBeanUtils.class.getCanonicalName(), method.getMethod().getName());
        String visitMethod;
        String visitVariable = "bean";
        if (method.isCollection()) {
            visitMethod = "Collection";
        } else if (method.isMap()) {
            visitMethod = "Map";
        } else if (method.isValueType()) {
            valueExpression = String.format("value = as.%s();", method.getMethod().getName());
            visitMethod = "Value";
            visitVariable = "value";
        } else {
            visitMethod = "Reference";
        }
        sw.println(valueExpression);
        List<JType> typeList = new ArrayList<JType>();
        createTypeList(typeList, method.getMethod().getReturnType());
        assert typeList.size() > 0;
        sw.println("propertyContext = new %s(", ClientPropertyContext.class.getCanonicalName());
        sw.indent();
        sw.println("as,");
        {
            if (setter != null) {
                sw.println("%sMethodReference(as),", setter.getMethod().getName());
                referencedSetters.add(setter);
            } else {
                sw.println("%s.beanSetter(%s.this, \"%s\"),", ClientPropertyContext.Setter.class.getCanonicalName(), type.getSimpleSourceName(), method.getPropertyName());
            }
        }
        if (typeList.size() == 1) {
            sw.println("%s.class", ModelUtils.ensureBaseType(typeList.get(0)).getQualifiedSourceName());
        } else {
            sw.print("new Class<?>[] {");
            boolean first = true;
            for (JType lit : typeList) {
                if (first) {
                    first = false;
                } else {
                    sw.print(", ");
                }
                sw.print("%s.class", ModelUtils.ensureBaseType(lit).getQualifiedSourceName());
            }
            sw.println("},");
            sw.print("new int[] {");
            first = true;
            for (JType lit : typeList) {
                if (first) {
                    first = false;
                } else {
                    sw.print(", ");
                }
                JParameterizedType hasParam = lit.isParameterized();
                if (hasParam == null) {
                    sw.print("0");
                } else {
                    sw.print(String.valueOf(hasParam.getTypeArgs().length));
                }
            }
            sw.println("}");
        }
        sw.outdent();
        sw.println(");");
        sw.println("if (visitor.visit%sProperty(\"%s\", %s, propertyContext)) {", visitMethod, method.getPropertyName(), visitVariable);
        if (!method.isValueType()) {
            sw.indentln("if (bean != null) { bean.traverse(visitor, ctx); }");
        }
        sw.println("}");
        sw.println("visitor.endVisit%sProperty(\"%s\", %s, propertyContext);", visitMethod, method.getPropertyName(), visitVariable);
    }
    sw.outdent();
    sw.println("}");
    for (AutoBeanMethod method : referencedSetters) {
        JMethod jmethod = method.getMethod();
        assert jmethod.getParameters().length == 1;
        sw.println("public static native %s %sMethodReference(Object instance) /*-{", ClientPropertyContext.Setter.class.getCanonicalName(), jmethod.getName());
        sw.indentln("return instance.@%s::%s(%s);", jmethod.getEnclosingType().getQualifiedSourceName(), jmethod.getName(), jmethod.getParameters()[0].getType().getJNISignature());
        sw.println("}-*/;");
    }
}
28327.8926196gwt
public JClassType adaptJavaClass(final Class<?> clazz) {
    if (clazz.isPrimitive()) {
        throw new RuntimeException("Only classes can be passed to adaptJavaClass");
    }
    JClassType type = adaptedClasses.get(clazz);
    if (type != null) {
        return type;
    }
    type = createMock(JClassType.class);
    final JClassType finalType = type;
    adaptedClasses.put(clazz, type);
    addAnnotationBehaviour(clazz, type);
    when(type.getMethods()).thenAnswer(new Answer<JMethod[]>() {

        @Override
        public JMethod[] answer(InvocationOnMock invocation) throws Throwable {
            Method[] realMethods = clazz.getDeclaredMethods();
            JMethod[] methods = new JMethod[realMethods.length];
            for (int i = 0; i < realMethods.length; i++) {
                methods[i] = adaptMethod(realMethods[i], finalType);
            }
            return methods;
        }
    });
    when(type.getConstructors()).thenAnswer(new Answer<JConstructor[]>() {

        @Override
        public JConstructor[] answer(InvocationOnMock invocation) throws Throwable {
            Constructor<?>[] realConstructors = clazz.getDeclaredConstructors();
            JConstructor[] constructors = new JConstructor[realConstructors.length];
            for (int i = 0; i < realConstructors.length; i++) {
                constructors[i] = adaptConstructor(realConstructors[i], finalType);
            }
            return constructors;
        }
    });
    when(type.getFields()).thenAnswer(new Answer<JField[]>() {

        @Override
        public JField[] answer(InvocationOnMock invocation) throws Throwable {
            Field[] realFields = clazz.getDeclaredFields();
            JField[] fields = new JField[realFields.length];
            for (int i = 0; i < realFields.length; i++) {
                fields[i] = adaptField(realFields[i], finalType);
            }
            return fields;
        }
    });
    when(type.getName()).thenReturn(clazz.getName());
    when(type.getQualifiedSourceName()).thenReturn(clazz.getCanonicalName());
    when(type.getSimpleSourceName()).thenReturn(clazz.getSimpleName());
    int modifiers = clazz.getModifiers();
    when(type.isAbstract()).thenReturn(Modifier.isAbstract(modifiers));
    when(type.isFinal()).thenReturn(Modifier.isFinal(modifiers));
    when(type.isPublic()).thenReturn(Modifier.isPublic(modifiers));
    when(type.isProtected()).thenReturn(Modifier.isProtected(modifiers));
    when(type.isPrivate()).thenReturn(Modifier.isPrivate(modifiers));
    when(type.isArray()).thenReturn(null);
    when(type.isEnum()).thenReturn(null);
    when(type.isPrimitive()).thenReturn(null);
    when(type.isClassOrInterface()).thenReturn(type);
    if (clazz.isInterface()) {
        when(type.isClass()).thenReturn(null);
        when(type.isInterface()).thenReturn(type);
    } else {
        when(type.isClass()).thenReturn(type);
        when(type.isInterface()).thenReturn(null);
    }
    when(type.getEnclosingType()).thenAnswer(new Answer<JClassType>() {

        @Override
        public JClassType answer(InvocationOnMock invocation) throws Throwable {
            Class<?> enclosingClass = clazz.getEnclosingClass();
            if (enclosingClass == null) {
                return null;
            }
            return adaptJavaClass(enclosingClass);
        }
    });
    when(type.getSuperclass()).thenAnswer(new Answer<JClassType>() {

        @Override
        public JClassType answer(InvocationOnMock invocation) throws Throwable {
            Class<?> superclass = clazz.getSuperclass();
            if (superclass == null) {
                return null;
            }
            return adaptJavaClass(superclass);
        }
    });
    when(type.getImplementedInterfaces()).thenAnswer(new Answer<JClassType[]>() {

        @Override
        public JClassType[] answer(InvocationOnMock invocation) throws Throwable {
            Class<?>[] interfaces = clazz.getInterfaces();
            if ((interfaces == null) || (interfaces.length == 0)) {
                return null;
            }
            JClassType[] adaptedInterfaces = new JClassType[interfaces.length];
            for (int i = 0; i < interfaces.length; i++) {
                adaptedInterfaces[i] = adaptJavaClass(interfaces[i]);
            }
            return adaptedInterfaces;
        }
    });
    when(type.getFlattenedSupertypeHierarchy()).thenAnswer(new Answer<Set<JClassType>>() {

        @Override
        public Set<JClassType> answer(InvocationOnMock invocation) throws Throwable {
            return flatten(clazz);
        }

        private Set<JClassType> flatten(Class<?> clazz) {
            Set<JClassType> flattened = new LinkedHashSet<JClassType>();
            flattened.add(adaptJavaClass(clazz));
            for (Class<?> intf : clazz.getInterfaces()) {
                flattened.addAll(flatten(intf));
            }
            Class<?> superClass = clazz.getSuperclass();
            if (superClass != null) {
                flattened.addAll(flatten(superClass));
            }
            return flattened;
        }
    });
    when(type.getInheritableMethods()).thenAnswer(new Answer<JMethod[]>() {

        @Override
        public JMethod[] answer(InvocationOnMock invocation) throws Throwable {
            Map<String, Method> methodsBySignature = new TreeMap<String, Method>();
            getInheritableMethodsOnSuperinterfacesAndMaybeThisInterface(clazz, methodsBySignature);
            if (!clazz.isInterface()) {
                getInheritableMethodsOnSuperclassesAndThisClass(clazz, methodsBySignature);
            }
            int size = methodsBySignature.size();
            if (size == 0) {
                return new JMethod[0];
            } else {
                Iterator<Method> leafMethods = methodsBySignature.values().iterator();
                JMethod[] jMethods = new JMethod[size];
                for (int i = 0; i < size; i++) {
                    Method method = leafMethods.next();
                    jMethods[i] = adaptMethod(method, adaptJavaClass(method.getDeclaringClass()));
                }
                return jMethods;
            }
        }

        protected void getInheritableMethodsOnSuperinterfacesAndMaybeThisInterface(Class<?> clazz, Map<String, Method> methodsBySignature) {
            Class<?>[] superIntfs = clazz.getInterfaces();
            for (Class<?> superIntf : superIntfs) {
                getInheritableMethodsOnSuperinterfacesAndMaybeThisInterface(superIntf, methodsBySignature);
            }
            Method[] declaredMethods = clazz.getMethods();
            for (Method method : declaredMethods) {
                String sig = computeInternalSignature(method);
                Method existing = methodsBySignature.get(sig);
                if (existing != null) {
                    Class<?> existingType = existing.getDeclaringClass();
                    Class<?> thisType = method.getDeclaringClass();
                    if (thisType.isAssignableFrom(existingType)) {
                        continue;
                    }
                }
                methodsBySignature.put(sig, method);
            }
        }

        protected void getInheritableMethodsOnSuperclassesAndThisClass(Class<?> clazz, Map<String, Method> methodsBySignature) {
            Class<?> superClass = clazz.getSuperclass();
            if (superClass != null) {
                getInheritableMethodsOnSuperclassesAndThisClass(superClass, methodsBySignature);
            }
            Method[] declaredMethods = clazz.getMethods();
            for (Method method : declaredMethods) {
                if (Modifier.isPrivate(method.getModifiers()) || Modifier.isStatic(method.getModifiers())) {
                    continue;
                }
                String sig = computeInternalSignature(method);
                methodsBySignature.put(sig, method);
            }
        }

        private String computeInternalSignature(Method method) {
            StringBuilder sb = new StringBuilder();
            sb.setLength(0);
            sb.append(method.getName());
            Class<?>[] params = method.getParameterTypes();
            for (Class<?> param : params) {
                sb.append("/");
                sb.append(param.getName());
            }
            return sb.toString();
        }
    });
    return type;
}
282077.3920117hadoop
private void rescanCachedBlockMap() {
    Set<DatanodeDescriptor> datanodes = blockManager.getDatanodeManager().getDatanodes();
    for (DatanodeDescriptor dn : datanodes) {
        long remaining = dn.getCacheRemaining();
        for (Iterator<CachedBlock> it = dn.getPendingCached().iterator(); it.hasNext(); ) {
            CachedBlock cblock = it.next();
            BlockInfo blockInfo = blockManager.getStoredBlock(new Block(cblock.getBlockId()));
            if (blockInfo == null) {
                LOG.debug("Block {}: cannot be found in block manager and hence" + " skipped from calculation for node {}.", cblock.getBlockId(), dn.getDatanodeUuid());
                continue;
            }
            if (blockInfo.getNumBytes() > remaining) {
                LOG.debug("Block {}: removing from PENDING_CACHED for node {} " + "because it cannot fit in remaining cache size {}.", cblock.getBlockId(), dn.getDatanodeUuid(), remaining);
                it.remove();
            } else {
                remaining -= blockInfo.getNumBytes();
            }
        }
    }
    if (cacheManager.isCheckLockTimeEnable()) {
        reacquireLock(lastScanTimeMs);
        lastScanTimeMs = Time.monotonicNow();
    }
    for (Iterator<CachedBlock> cbIter = cachedBlocks.iterator(); cbIter.hasNext(); ) {
        scannedBlocks++;
        CachedBlock cblock = cbIter.next();
        List<DatanodeDescriptor> pendingCached = cblock.getDatanodes(Type.PENDING_CACHED);
        List<DatanodeDescriptor> cached = cblock.getDatanodes(Type.CACHED);
        List<DatanodeDescriptor> pendingUncached = cblock.getDatanodes(Type.PENDING_UNCACHED);
        for (Iterator<DatanodeDescriptor> iter = pendingUncached.iterator(); iter.hasNext(); ) {
            DatanodeDescriptor datanode = iter.next();
            if (!cblock.isInList(datanode.getCached())) {
                LOG.trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because the DataNode uncached it.", cblock.getBlockId(), datanode.getDatanodeUuid());
                datanode.getPendingUncached().remove(cblock);
                iter.remove();
            }
        }
        BlockInfo blockInfo = blockManager.getStoredBlock(new Block(cblock.getBlockId()));
        String reason = findReasonForNotCaching(cblock, blockInfo);
        int neededCached = 0;
        if (reason != null) {
            LOG.trace("Block {}: can't cache block because it is {}", cblock.getBlockId(), reason);
        } else {
            neededCached = cblock.getReplication();
        }
        int numCached = cached.size();
        if (numCached >= neededCached) {
            for (Iterator<DatanodeDescriptor> iter = pendingCached.iterator(); iter.hasNext(); ) {
                DatanodeDescriptor datanode = iter.next();
                datanode.getPendingCached().remove(cblock);
                iter.remove();
                LOG.trace("Block {}: removing from PENDING_CACHED for node {} " + "because we already have {} cached replicas and we only" + " need {}", cblock.getBlockId(), datanode.getDatanodeUuid(), numCached, neededCached);
            }
        }
        if (numCached < neededCached) {
            for (Iterator<DatanodeDescriptor> iter = pendingUncached.iterator(); iter.hasNext(); ) {
                DatanodeDescriptor datanode = iter.next();
                datanode.getPendingUncached().remove(cblock);
                iter.remove();
                LOG.trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because we only have {} cached replicas and we need " + "{}", cblock.getBlockId(), datanode.getDatanodeUuid(), numCached, neededCached);
            }
        }
        int neededUncached = numCached - (pendingUncached.size() + neededCached);
        if (neededUncached > 0) {
            addNewPendingUncached(neededUncached, cblock, cached, pendingUncached);
        } else {
            int additionalCachedNeeded = neededCached - (numCached + pendingCached.size());
            if (additionalCachedNeeded > 0) {
                addNewPendingCached(additionalCachedNeeded, cblock, cached, pendingCached);
            }
        }
        if ((neededCached == 0) && pendingUncached.isEmpty() && pendingCached.isEmpty()) {
            LOG.trace("Block {}: removing from cachedBlocks, since neededCached " + "== 0, and pendingUncached and pendingCached are empty.", cblock.getBlockId());
            cbIter.remove();
        }
        if (cacheManager.isCheckLockTimeEnable()) {
            reacquireLock(lastScanTimeMs);
            lastScanTimeMs = Time.monotonicNow();
        }
    }
}
283950.082130hadoop
public void testPointInTimeSnapshotCopiesForOpenFiles() throws Exception {
    final Path level0A = new Path("/level_0_A");
    final Path level0B = new Path("/level_0_B");
    final Path level1C = new Path(level0A, "level_1_C");
    final Path level1D = new Path(level0B, "level_1_D");
    final Path level2E = new Path(level1C, "level_2_E");
    final Path level3G = new Path(level2E, "level_3_G");
    Set<Path> dirPaths = new HashSet<>(Arrays.asList(level0A, level0B, level1C, level1D, level2E, level3G));
    for (Path dirPath : dirPaths) {
        fs.mkdirs(dirPath);
    }
    final Path flumeSnapRootDir = level2E;
    final Path hbaseSnapRootDir = level1D;
    final String flumeFileName = "flume.log";
    final String hbaseFileName = "hbase.log";
    final String appAFileName = "appA.log";
    final String appBFileName = "appB.log";
    final String flumeSnap1Name = "flume_snap_s1";
    final String flumeSnap2Name = "flume_snap_s2";
    final String flumeSnap3Name = "flume_snap_s3";
    final String hbaseSnap1Name = "hbase_snap_s1";
    final String hbaseSnap2Name = "hbase_snap_s2";
    final String hbaseSnap3Name = "hbase_snap_s3";
    final String flumeRelPathFromSnapDir = "level_3_G/" + flumeFileName;
    final Path flumeFile = new Path(level3G, flumeFileName);
    createFile(flumeFile);
    FSDataOutputStream flumeOutputStream = fs.append(flumeFile);
    final Path hbaseFile = new Path(level1D, hbaseFileName);
    createFile(hbaseFile);
    FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
    final Path appAFile = new Path(level1C, appAFileName);
    createFile(appAFile);
    FSDataOutputStream appAOutputStream = fs.append(appAFile);
    final Path appBFile = new Path(level0B, appBFileName);
    createFile(appBFile);
    FSDataOutputStream appBOutputStream = fs.append(appBFile);
    final long appAFileInitialLength = fs.getFileStatus(appAFile).getLen();
    final long appBFileInitialLength = fs.getFileStatus(appBFile).getLen();
    final Path flumeS1Dir = SnapshotTestHelper.createSnapshot(fs, flumeSnapRootDir, flumeSnap1Name);
    final Path flumeS1Path = new Path(flumeS1Dir, flumeRelPathFromSnapDir);
    final Path hbaseS1Dir = SnapshotTestHelper.createSnapshot(fs, hbaseSnapRootDir, hbaseSnap1Name);
    final Path hbaseS1Path = new Path(hbaseS1Dir, hbaseFileName);
    final long flumeFileLengthAfterS1 = fs.getFileStatus(flumeFile).getLen();
    final long hbaseFileLengthAfterS1 = fs.getFileStatus(hbaseFile).getLen();
    Assert.assertEquals(flumeFileLengthAfterS1, fs.getFileStatus(flumeS1Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS1, fs.getFileStatus(hbaseS1Path).getLen());
    Assert.assertEquals(appAFileInitialLength, fs.getFileStatus(appAFile).getLen());
    Assert.assertEquals(appBFileInitialLength, fs.getFileStatus(appBFile).getLen());
    long flumeFileWrittenDataLength = flumeFileLengthAfterS1;
    long hbaseFileWrittenDataLength = hbaseFileLengthAfterS1;
    long appAFileWrittenDataLength = appAFileInitialLength;
    int newWriteLength = (int) (BLOCKSIZE * 1.5);
    byte[] buf = new byte[newWriteLength];
    Random random = new Random();
    random.nextBytes(buf);
    flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf);
    hbaseFileWrittenDataLength += writeToStream(hbaseOutputStream, buf);
    final Path flumeS2Dir = SnapshotTestHelper.createSnapshot(fs, flumeSnapRootDir, flumeSnap2Name);
    final Path flumeS2Path = new Path(flumeS2Dir, flumeRelPathFromSnapDir);
    final Path hbaseS2Dir = SnapshotTestHelper.createSnapshot(fs, hbaseSnapRootDir, hbaseSnap2Name);
    final Path hbaseS2Path = new Path(hbaseS2Dir, hbaseFileName);
    final long flumeFileLengthAfterS2 = fs.getFileStatus(flumeFile).getLen();
    final long hbaseFileLengthAfterS2 = fs.getFileStatus(hbaseFile).getLen();
    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS2);
    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS2);
    Assert.assertEquals(flumeFileLengthAfterS2, fs.getFileStatus(flumeS2Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS2, fs.getFileStatus(hbaseS2Path).getLen());
    Assert.assertEquals(appAFileInitialLength, fs.getFileStatus(appAFile).getLen());
    Assert.assertEquals(appBFileInitialLength, fs.getFileStatus(appBFile).getLen());
    newWriteLength = (int) (BLOCKSIZE * 2.5);
    buf = new byte[newWriteLength];
    random.nextBytes(buf);
    appAFileWrittenDataLength += writeToStream(appAOutputStream, buf);
    Assert.assertEquals(flumeFileLengthAfterS2, fs.getFileStatus(flumeS2Path).getLen());
    Assert.assertEquals(appAFileWrittenDataLength, fs.getFileStatus(appAFile).getLen());
    newWriteLength = (int) (BLOCKSIZE * 2.5);
    buf = new byte[newWriteLength];
    random.nextBytes(buf);
    flumeFileWrittenDataLength += writeToStream(flumeOutputStream, buf);
    final Path flumeS3Dir = SnapshotTestHelper.createSnapshot(fs, flumeSnapRootDir, flumeSnap3Name);
    final Path flumeS3Path = new Path(flumeS3Dir, flumeRelPathFromSnapDir);
    final Path hbaseS3Dir = SnapshotTestHelper.createSnapshot(fs, hbaseSnapRootDir, hbaseSnap3Name);
    final Path hbaseS3Path = new Path(hbaseS3Dir, hbaseFileName);
    final long flumeFileLengthAfterS3 = fs.getFileStatus(flumeFile).getLen();
    final long hbaseFileLengthAfterS3 = fs.getFileStatus(hbaseFile).getLen();
    Assert.assertEquals(flumeFileWrittenDataLength, flumeFileLengthAfterS3);
    Assert.assertEquals(hbaseFileWrittenDataLength, hbaseFileLengthAfterS3);
    Assert.assertEquals(flumeFileLengthAfterS3, fs.getFileStatus(flumeS3Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS3, fs.getFileStatus(hbaseS3Path).getLen());
    Assert.assertEquals(appAFileWrittenDataLength, fs.getFileStatus(appAFile).getLen());
    Assert.assertEquals(appBFileInitialLength, fs.getFileStatus(appBFile).getLen());
    Assert.assertEquals(flumeFileLengthAfterS1, fs.getFileStatus(flumeS1Path).getLen());
    Assert.assertEquals(flumeFileLengthAfterS2, fs.getFileStatus(flumeS2Path).getLen());
    Assert.assertEquals(flumeFileLengthAfterS3, fs.getFileStatus(flumeS3Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS1, fs.getFileStatus(hbaseS1Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS2, fs.getFileStatus(hbaseS2Path).getLen());
    Assert.assertEquals(hbaseFileLengthAfterS3, fs.getFileStatus(hbaseS3Path).getLen());
    flumeOutputStream.close();
    hbaseOutputStream.close();
    appAOutputStream.close();
    appBOutputStream.close();
}
283270.5415110hadoop
 void testCheckpoint(StartupOption op) throws Exception {
    Path file1 = new Path("/checkpoint.dat");
    Path file2 = new Path("/checkpoint2.dat");
    Path file3 = new Path("/backup.dat");
    Configuration conf = new HdfsConfiguration();
    HAUtil.setAllowStandbyReads(conf, true);
    short replication = (short) conf.getInt("dfs.replication", 3);
    int numDatanodes = Math.max(3, replication);
    conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "localhost:0");
    conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    BackupNode backup = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        fileSys = cluster.getFileSystem();
        assertTrue(!fileSys.exists(file1));
        assertTrue(!fileSys.exists(file2));
        assertTrue(fileSys.mkdirs(file1));
        long txid = cluster.getNameNodeRpc().getTransactionID();
        backup = startBackupNode(conf, op, 1);
        waitCheckpointDone(cluster, txid);
    } catch (IOException e) {
        LOG.error("Error in TestBackupNode:", e);
        assertTrue(e.getLocalizedMessage(), false);
    } finally {
        if (backup != null)
            backup.stop();
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
    File nnCurDir = new File(MiniDFSCluster.getNameNodeDirectory(BASE_DIR, 0, 0)[0], "current/");
    File bnCurDir = new File(getBackupNodeDir(op, 1), "/current/");
    FSImageTestUtil.assertParallelFilesAreIdentical(ImmutableList.of(bnCurDir, nnCurDir), ImmutableSet.<String>of("VERSION"));
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        fileSys = cluster.getFileSystem();
        assertTrue(fileSys.exists(file1));
        fileSys.delete(file1, true);
        fileSys.mkdirs(file2);
        long txid = cluster.getNameNodeRpc().getTransactionID();
        backup = startBackupNode(conf, op, 1);
        waitCheckpointDone(cluster, txid);
        for (int i = 0; i < 10; i++) {
            fileSys.mkdirs(new Path("file_" + i));
        }
        txid = cluster.getNameNodeRpc().getTransactionID();
        backup.doCheckpoint();
        waitCheckpointDone(cluster, txid);
        txid = cluster.getNameNodeRpc().getTransactionID();
        backup.doCheckpoint();
        waitCheckpointDone(cluster, txid);
        InetSocketAddress add = backup.getNameNodeAddress();
        FileSystem bnFS = FileSystem.get(new Path("hdfs://" + NetUtils.getHostPortString(add)).toUri(), conf);
        boolean canWrite = true;
        try {
            DFSTestUtil.createFile(bnFS, file3, fileSize, fileSize, blockSize, replication, seed);
        } catch (IOException eio) {
            LOG.info("Write to " + backup.getRole() + " failed as expected: ", eio);
            canWrite = false;
        }
        assertFalse("Write to BackupNode must be prohibited.", canWrite);
        boolean canRead = true;
        try {
            bnFS.exists(file2);
        } catch (IOException eio) {
            LOG.info("Read from " + backup.getRole() + " failed: ", eio);
            canRead = false;
        }
        assertEquals("Reads to BackupNode are allowed, but not CheckpointNode.", canRead, backup.isRole(NamenodeRole.BACKUP));
        DFSTestUtil.createFile(fileSys, file3, fileSize, fileSize, blockSize, replication, seed);
        TestCheckpoint.checkFile(fileSys, file3, replication);
        assertTrue("file3 does not exist on BackupNode", op != StartupOption.BACKUP || backup.getNamesystem().getFileInfo(file3.toUri().getPath(), false, false, false) != null);
    } catch (IOException e) {
        LOG.error("Error in TestBackupNode:", e);
        throw new AssertionError(e);
    } finally {
        if (backup != null)
            backup.stop();
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
    FSImageTestUtil.assertParallelFilesAreIdentical(ImmutableList.of(bnCurDir, nnCurDir), ImmutableSet.<String>of("VERSION"));
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
        fileSys = cluster.getFileSystem();
        assertTrue(!fileSys.exists(file1));
        assertTrue(fileSys.exists(file2));
    } catch (IOException e) {
        LOG.error("Error in TestBackupNode: ", e);
        assertTrue(e.getLocalizedMessage(), false);
    } finally {
        fileSys.close();
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
283001.125132hadoop
public void testNameEditsConfigsFailure() throws IOException {
    Path file1 = new Path("TestNameEditsConfigs1");
    Path file2 = new Path("TestNameEditsConfigs2");
    Path file3 = new Path("TestNameEditsConfigs3");
    MiniDFSCluster cluster = null;
    Configuration conf = null;
    FileSystem fileSys = null;
    File nameOnlyDir = new File(base_dir, "name");
    File editsOnlyDir = new File(base_dir, "edits");
    File nameAndEditsDir = new File(base_dir, "name_and_edits");
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
        cluster.waitActive();
        assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
        fileSys = cluster.getFileSystem();
        assertTrue(!fileSys.exists(file1));
        DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file1, replication);
    } finally {
        if (fileSys != null) {
            fileSys.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
    conf = new HdfsConfiguration();
    assertTrue(nameOnlyDir.mkdir());
    assertTrue(editsOnlyDir.mkdir());
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath() + "," + nameOnlyDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath() + "," + editsOnlyDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
        cluster.waitActive();
        assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
        assertTrue(new File(nameOnlyDir, "current/VERSION").exists());
        assertTrue(new File(editsOnlyDir, "current/VERSION").exists());
        fileSys = cluster.getFileSystem();
        assertTrue(fileSys.exists(file1));
        checkFile(fileSys, file1, replication);
        cleanupFile(fileSys, file1);
        DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file2, replication);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
    try {
        conf = new HdfsConfiguration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath());
        conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath());
        replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        assertFalse(fileSys.exists(file1));
        assertTrue(fileSys.exists(file2));
        checkFile(fileSys, file2, replication);
        cleanupFile(fileSys, file2);
        DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file3, replication);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
        fail("Successfully started cluster but should not have been able to.");
    } catch (IOException e) {
        LOG.info("EXPECTED: cluster start failed due to missing " + "latest edits dir", e);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        cluster = null;
    }
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
        fileSys = cluster.getFileSystem();
        assertFalse(fileSys.exists(file1));
        assertFalse(fileSys.exists(file2));
        assertTrue(fileSys.exists(file3));
        checkFile(fileSys, file3, replication);
        cleanupFile(fileSys, file3);
        DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file3, replication);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
283386.4120102hadoop
 static void checkData(DistributedFileSystem dfs, Path srcPath, int length, List<DatanodeInfo> killedList, List<Long> oldGSList, int blkGroupSize) throws IOException {
    StripedFileTestUtil.verifyLength(dfs, srcPath, length);
    List<List<LocatedBlock>> blockGroupList = new ArrayList<>();
    LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 0L, Long.MAX_VALUE);
    if (length > 0) {
        int expectedNumGroup = (length - 1) / blkGroupSize + 1;
        assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
    }
    final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
    final int cellSize = ecPolicy.getCellSize();
    final int dataBlkNum = ecPolicy.getNumDataUnits();
    final int parityBlkNum = ecPolicy.getNumParityUnits();
    int index = 0;
    for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
        Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
        final long gs = firstBlock.getBlock().getGenerationStamp();
        final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
        final String s = "gs=" + gs + ", oldGS=" + oldGS;
        LOG.info(s);
        Assert.assertTrue(s, gs >= oldGS);
        LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup((LocatedStripedBlock) firstBlock, cellSize, dataBlkNum, parityBlkNum);
        blockGroupList.add(Arrays.asList(blocks));
    }
    for (int group = 0; group < blockGroupList.size(); group++) {
        final boolean isLastGroup = group == blockGroupList.size() - 1;
        final int groupSize = !isLastGroup ? blkGroupSize : length - (blockGroupList.size() - 1) * blkGroupSize;
        final int numCellInGroup = (groupSize - 1) / cellSize + 1;
        final int lastCellIndex = (numCellInGroup - 1) % dataBlkNum;
        final int lastCellSize = groupSize - (numCellInGroup - 1) * cellSize;
        List<LocatedBlock> blockList = blockGroupList.get(group);
        byte[][] dataBlockBytes = new byte[dataBlkNum][];
        byte[][] parityBlockBytes = new byte[parityBlkNum][];
        Set<Integer> checkSet = new HashSet<>();
        for (int i = 0; i < blockList.size(); i++) {
            final int j = i >= dataBlkNum ? 0 : i;
            final int numCellInBlock = (numCellInGroup - 1) / dataBlkNum + (j <= lastCellIndex ? 1 : 0);
            final int blockSize = numCellInBlock * cellSize + (isLastGroup && j == lastCellIndex ? lastCellSize - cellSize : 0);
            final byte[] blockBytes = new byte[blockSize];
            if (i < dataBlkNum) {
                dataBlockBytes[i] = blockBytes;
            } else {
                parityBlockBytes[i - dataBlkNum] = blockBytes;
            }
            final LocatedBlock lb = blockList.get(i);
            LOG.info("i,j=" + i + ", " + j + ", numCellInBlock=" + numCellInBlock + ", blockSize=" + blockSize + ", lb=" + lb);
            if (lb == null) {
                continue;
            }
            final ExtendedBlock block = lb.getBlock();
            assertEquals(blockSize, block.getNumBytes());
            if (block.getNumBytes() == 0) {
                continue;
            }
            DatanodeInfo dn = blockList.get(i).getLocations()[0];
            if (!killedList.contains(dn)) {
                final BlockReader blockReader = BlockReaderTestUtil.getBlockReader(dfs, lb, 0, block.getNumBytes());
                blockReader.readAll(blockBytes, 0, (int) block.getNumBytes());
                blockReader.close();
                checkSet.add(i);
            }
        }
        LOG.info("Internal blocks to check: " + checkSet);
        final int groupPosInFile = group * blkGroupSize;
        for (int i = 0; i < dataBlockBytes.length; i++) {
            boolean killed = false;
            if (!checkSet.contains(i)) {
                killed = true;
            }
            final byte[] actual = dataBlockBytes[i];
            for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
                final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
                Assert.assertTrue(posInFile < length);
                final byte expected = getByte(posInFile);
                if (killed) {
                    actual[posInBlk] = expected;
                } else {
                    if (expected != actual[posInBlk]) {
                        String s = "expected=" + expected + " but actual=" + actual[posInBlk] + ", posInFile=" + posInFile + ", posInBlk=" + posInBlk + ". group=" + group + ", i=" + i;
                        Assert.fail(s);
                    }
                }
            }
        }
        verifyParityBlocks(dfs.getConf(), lbs.getLocatedBlocks().get(group).getBlockSize(), cellSize, dataBlockBytes, parityBlockBytes, checkSet, ecPolicy.getCodecName());
    }
}
284305.54119hadoop
public void testGetFileStatusOnDir() throws Exception {
    Path dir = new Path("/test/mkdirs");
    assertTrue("mkdir failed", fs.mkdirs(dir));
    assertTrue("mkdir failed", fs.exists(dir));
    FileStatus status = fs.getFileStatus(dir);
    assertTrue(dir + " should be a directory", status.isDirectory());
    assertTrue(dir + " should be zero size ", status.getLen() == 0);
    ContractTestUtils.assertNotErasureCoded(fs, dir);
    assertEquals(dir.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
    FileStatus[] stats = fs.listStatus(dir);
    assertEquals(dir + " should be empty", 0, stats.length);
    assertEquals(dir + " should be zero size ", 0, fs.getContentSummary(dir).getLength());
    RemoteIterator<FileStatus> itor = fc.listStatus(dir);
    assertFalse(dir + " should be empty", itor.hasNext());
    itor = fs.listStatusIterator(dir);
    assertFalse(dir + " should be empty", itor.hasNext());
    Path file2 = new Path(dir, "filestatus2.dat");
    DFSTestUtil.createFile(fs, file2, blockSize / 4, blockSize / 4, blockSize, (short) 1, seed);
    checkFile(fs, file2, 1);
    status = fs.getFileStatus(file2);
    assertEquals(blockSize, status.getBlockSize());
    assertEquals(1, status.getReplication());
    file2 = fs.makeQualified(file2);
    assertEquals(file2.toString(), status.getPath().toString());
    Path file3 = new Path(dir, "filestatus3.dat");
    DFSTestUtil.createFile(fs, file3, blockSize / 4, blockSize / 4, blockSize, (short) 1, seed);
    checkFile(fs, file3, 1);
    file3 = fs.makeQualified(file3);
    final int expected = blockSize / 2;
    assertEquals(dir + " size should be " + expected, expected, fs.getContentSummary(dir).getLength());
    stats = fs.listStatus(dir);
    assertEquals(dir + " should have two entries", 2, stats.length);
    assertEquals(file2.toString(), stats[0].getPath().toString());
    assertEquals(file3.toString(), stats[1].getPath().toString());
    itor = fc.listStatus(dir);
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse("Unexpected addtional file", itor.hasNext());
    itor = fs.listStatusIterator(dir);
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse("Unexpected addtional file", itor.hasNext());
    Path dir3 = fs.makeQualified(new Path(dir, "dir3"));
    fs.mkdirs(dir3);
    dir3 = fs.makeQualified(dir3);
    stats = fs.listStatus(dir);
    assertEquals(dir + " should have three entries", 3, stats.length);
    assertEquals(dir3.toString(), stats[0].getPath().toString());
    assertEquals(file2.toString(), stats[1].getPath().toString());
    assertEquals(file3.toString(), stats[2].getPath().toString());
    itor = fc.listStatus(dir);
    assertEquals(dir3.toString(), itor.next().getPath().toString());
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse("Unexpected addtional file", itor.hasNext());
    itor = fs.listStatusIterator(dir);
    assertEquals(dir3.toString(), itor.next().getPath().toString());
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse("Unexpected addtional file", itor.hasNext());
    Path dir4 = fs.makeQualified(new Path(dir, "dir4"));
    fs.mkdirs(dir4);
    dir4 = fs.makeQualified(dir4);
    Path dir5 = fs.makeQualified(new Path(dir, "dir5"));
    fs.mkdirs(dir5);
    dir5 = fs.makeQualified(dir5);
    stats = fs.listStatus(dir);
    assertEquals(dir + " should have five entries", 5, stats.length);
    assertEquals(dir3.toString(), stats[0].getPath().toString());
    assertEquals(dir4.toString(), stats[1].getPath().toString());
    assertEquals(dir5.toString(), stats[2].getPath().toString());
    assertEquals(file2.toString(), stats[3].getPath().toString());
    assertEquals(file3.toString(), stats[4].getPath().toString());
    itor = fc.listStatus(dir);
    assertEquals(dir3.toString(), itor.next().getPath().toString());
    assertEquals(dir4.toString(), itor.next().getPath().toString());
    assertEquals(dir5.toString(), itor.next().getPath().toString());
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse(itor.hasNext());
    itor = fs.listStatusIterator(dir);
    assertEquals(dir3.toString(), itor.next().getPath().toString());
    assertEquals(dir4.toString(), itor.next().getPath().toString());
    assertEquals(dir5.toString(), itor.next().getPath().toString());
    assertEquals(file2.toString(), itor.next().getPath().toString());
    assertEquals(file3.toString(), itor.next().getPath().toString());
    assertFalse(itor.hasNext());
    itor = fs.listStatusIterator(dir);
    assertEquals(dir3.toString(), itor.next().getPath().toString());
    assertEquals(dir4.toString(), itor.next().getPath().toString());
    fs.delete(dir.getParent(), true);
    try {
        itor.hasNext();
        fail("FileNotFoundException expected");
    } catch (FileNotFoundException fnfe) {
    }
    fs.mkdirs(file2);
    fs.mkdirs(dir3);
    fs.mkdirs(dir4);
    fs.mkdirs(dir5);
    itor = fs.listStatusIterator(dir);
    int count = 0;
    try {
        fs.delete(dir.getParent(), true);
        while (itor.next() != null) {
            count++;
        }
        fail("FileNotFoundException expected");
    } catch (FileNotFoundException fnfe) {
    }
    assertEquals(2, count);
}
282559.5721118hadoop
private List<ResourceRequest> generateResourceRequests() throws IOException {
    Resource capability = recordFactory.newRecordInstance(Resource.class);
    boolean memorySet = false;
    boolean cpuVcoresSet = false;
    List<ResourceInformation> resourceRequests = ResourceUtils.getRequestedResourcesFromConfig(conf, MR_AM_RESOURCE_PREFIX);
    for (ResourceInformation resourceReq : resourceRequests) {
        String resourceName = resourceReq.getName();
        if (MRJobConfig.RESOURCE_TYPE_NAME_MEMORY.equals(resourceName) || MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY.equals(resourceName)) {
            if (memorySet) {
                throw new IllegalArgumentException("Only one of the following keys " + "can be specified for a single job: " + MRJobConfig.RESOURCE_TYPE_ALTERNATIVE_NAME_MEMORY + ", " + MRJobConfig.RESOURCE_TYPE_NAME_MEMORY);
            }
            String units = isEmpty(resourceReq.getUnits()) ? ResourceUtils.getDefaultUnit(ResourceInformation.MEMORY_URI) : resourceReq.getUnits();
            capability.setMemorySize(UnitsConversionUtil.convert(units, "Mi", resourceReq.getValue()));
            memorySet = true;
            if (conf.get(MRJobConfig.MR_AM_VMEM_MB) != null) {
                LOG.warn("Configuration " + MR_AM_RESOURCE_PREFIX + resourceName + "=" + resourceReq.getValue() + resourceReq.getUnits() + " is overriding the " + MRJobConfig.MR_AM_VMEM_MB + "=" + conf.get(MRJobConfig.MR_AM_VMEM_MB) + " configuration");
            }
        } else if (MRJobConfig.RESOURCE_TYPE_NAME_VCORE.equals(resourceName)) {
            capability.setVirtualCores((int) UnitsConversionUtil.convert(resourceReq.getUnits(), "", resourceReq.getValue()));
            cpuVcoresSet = true;
            if (conf.get(MRJobConfig.MR_AM_CPU_VCORES) != null) {
                LOG.warn("Configuration " + MR_AM_RESOURCE_PREFIX + resourceName + "=" + resourceReq.getValue() + resourceReq.getUnits() + " is overriding the " + MRJobConfig.MR_AM_CPU_VCORES + "=" + conf.get(MRJobConfig.MR_AM_CPU_VCORES) + " configuration");
            }
        } else if (!MRJobConfig.MR_AM_VMEM_MB.equals(MR_AM_RESOURCE_PREFIX + resourceName) && !MRJobConfig.MR_AM_CPU_VCORES.equals(MR_AM_RESOURCE_PREFIX + resourceName)) {
            ResourceInformation resourceInformation = capability.getResourceInformation(resourceName);
            resourceInformation.setUnits(resourceReq.getUnits());
            resourceInformation.setValue(resourceReq.getValue());
            capability.setResourceInformation(resourceName, resourceInformation);
        }
    }
    if (!memorySet) {
        capability.setMemorySize(conf.getInt(MRJobConfig.MR_AM_VMEM_MB, MRJobConfig.DEFAULT_MR_AM_VMEM_MB));
    }
    if (!cpuVcoresSet) {
        capability.setVirtualCores(conf.getInt(MRJobConfig.MR_AM_CPU_VCORES, MRJobConfig.DEFAULT_MR_AM_CPU_VCORES));
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("AppMaster capability = " + capability);
    }
    List<ResourceRequest> amResourceRequests = new ArrayList<>();
    ResourceRequest amAnyResourceRequest = createAMResourceRequest(ResourceRequest.ANY, capability);
    Map<String, ResourceRequest> rackRequests = new HashMap<>();
    amResourceRequests.add(amAnyResourceRequest);
    Collection<String> amStrictResources = conf.getStringCollection(MRJobConfig.AM_STRICT_LOCALITY);
    for (String amStrictResource : amStrictResources) {
        amAnyResourceRequest.setRelaxLocality(false);
        Matcher matcher = RACK_NODE_PATTERN.matcher(amStrictResource);
        if (matcher.matches()) {
            String nodeName;
            String rackName = matcher.group(RACK_GROUP);
            if (rackName == null) {
                rackName = "/default-rack";
                nodeName = matcher.group(NODE_IF_NO_RACK_GROUP);
            } else {
                nodeName = matcher.group(NODE_IF_RACK_GROUP);
            }
            ResourceRequest amRackResourceRequest = rackRequests.get(rackName);
            if (amRackResourceRequest == null) {
                amRackResourceRequest = createAMResourceRequest(rackName, capability);
                amResourceRequests.add(amRackResourceRequest);
                rackRequests.put(rackName, amRackResourceRequest);
            }
            if (nodeName != null) {
                amRackResourceRequest.setRelaxLocality(false);
                ResourceRequest amNodeResourceRequest = createAMResourceRequest(nodeName, capability);
                amResourceRequests.add(amNodeResourceRequest);
            }
        } else {
            String errMsg = "Invalid resource name: " + amStrictResource + " specified.";
            LOG.warn(errMsg);
            throw new IOException(errMsg);
        }
    }
    if (LOG.isDebugEnabled()) {
        for (ResourceRequest amResourceRequest : amResourceRequests) {
            LOG.debug("ResourceRequest: resource = " + amResourceRequest.getResourceName() + ", locality = " + amResourceRequest.getRelaxLocality());
        }
    }
    return amResourceRequests;
}
281868.7426115hadoop
private boolean monitorInfraApplication() throws YarnException, IOException {
    boolean loggedApplicationInfo = false;
    boolean success = false;
    Thread namenodeMonitoringThread = new Thread(() -> {
        Supplier<Boolean> exitCritera = () -> Apps.isApplicationFinalState(infraAppState);
        Optional<Properties> namenodeProperties = Optional.empty();
        while (!exitCritera.get()) {
            try {
                if (!namenodeProperties.isPresent()) {
                    namenodeProperties = DynoInfraUtils.waitForAndGetNameNodeProperties(exitCritera, getConf(), getNameNodeInfoPath(), LOG);
                    if (namenodeProperties.isPresent()) {
                        Properties props = namenodeProperties.get();
                        LOG.info("NameNode can be reached via HDFS at: {}", DynoInfraUtils.getNameNodeHdfsUri(props));
                        LOG.info("NameNode web UI available at: {}", DynoInfraUtils.getNameNodeWebUri(props));
                        LOG.info("NameNode can be tracked at: {}", DynoInfraUtils.getNameNodeTrackingUri(props));
                    } else {
                        break;
                    }
                }
                DynoInfraUtils.waitForNameNodeStartup(namenodeProperties.get(), exitCritera, LOG);
                DynoInfraUtils.waitForNameNodeReadiness(namenodeProperties.get(), numTotalDataNodes, false, exitCritera, getConf(), LOG);
                break;
            } catch (IOException ioe) {
                LOG.error("Unexpected exception while waiting for NameNode readiness", ioe);
            } catch (InterruptedException ie) {
                return;
            }
        }
        if (!Apps.isApplicationFinalState(infraAppState) && launchWorkloadJob) {
            launchAndMonitorWorkloadDriver(namenodeProperties.get());
        }
    });
    if (launchNameNode) {
        namenodeMonitoringThread.start();
    }
    while (true) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            LOG.debug("Thread sleep in monitoring loop interrupted");
        }
        ApplicationReport report = yarnClient.getApplicationReport(infraAppId);
        if (report.getTrackingUrl() != null && !loggedApplicationInfo) {
            loggedApplicationInfo = true;
            LOG.info("Track the application at: " + report.getTrackingUrl());
            LOG.info("Kill the application using: yarn application -kill " + report.getApplicationId());
        }
        LOG.debug("Got application report from ASM for: appId={}, " + "clientToAMToken={}, appDiagnostics={}, appMasterHost={}, " + "appQueue={}, appMasterRpcPort={}, appStartTime={}, " + "yarnAppState={}, distributedFinalState={}, appTrackingUrl={}, " + "appUser={}", infraAppId.getId(), report.getClientToAMToken(), report.getDiagnostics(), report.getHost(), report.getQueue(), report.getRpcPort(), report.getStartTime(), report.getYarnApplicationState(), report.getFinalApplicationStatus(), report.getTrackingUrl(), report.getUser());
        infraAppState = report.getYarnApplicationState();
        if (infraAppState == YarnApplicationState.KILLED) {
            if (!launchWorkloadJob) {
                success = true;
            } else if (workloadJob == null) {
                LOG.error("Infra app was killed before workload job was launched.");
            } else if (!workloadJob.isComplete()) {
                LOG.error("Infra app was killed before workload job completed.");
            } else if (workloadJob.isSuccessful()) {
                success = true;
            }
            LOG.info("Infra app was killed; exiting from client.");
            break;
        } else if (infraAppState == YarnApplicationState.FINISHED || infraAppState == YarnApplicationState.FAILED) {
            LOG.info("Infra app exited unexpectedly. YarnState=" + infraAppState.toString() + ". Exiting from client.");
            break;
        }
        if ((clientTimeout != -1) && (System.currentTimeMillis() > (clientStartTime + clientTimeout))) {
            LOG.info("Reached client specified timeout of {} ms for application. " + "Killing application", clientTimeout);
            attemptCleanup();
            break;
        }
        if (isCompleted(workloadAppState)) {
            LOG.info("Killing infrastructure app");
            try {
                forceKillApplication(infraAppId);
            } catch (YarnException | IOException e) {
                LOG.error("Exception encountered while killing infra app", e);
            }
        }
    }
    if (launchNameNode) {
        try {
            namenodeMonitoringThread.interrupt();
            namenodeMonitoringThread.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted while joining workload job thread; " + "continuing to cleanup.");
        }
    }
    attemptCleanup();
    return success;
}
282294.1829100hadoop
public ParsedJob build() {
    finalized = true;
    if (jobConfigurationParameters != null) {
        result.setJobProperties(jobConfigurationParameters);
    }
    Histogram[] successfulMapAttemptTimes = new Histogram[ParsedHost.numberOfDistances() + 1];
    for (int i = 0; i < successfulMapAttemptTimes.length; ++i) {
        successfulMapAttemptTimes[i] = new Histogram();
    }
    Histogram successfulReduceAttemptTimes = new Histogram();
    Histogram[] failedMapAttemptTimes = new Histogram[ParsedHost.numberOfDistances() + 1];
    for (int i = 0; i < failedMapAttemptTimes.length; ++i) {
        failedMapAttemptTimes[i] = new Histogram();
    }
    Histogram failedReduceAttemptTimes = new Histogram();
    Histogram successfulNthMapperAttempts = new Histogram();
    for (LoggedTask task : result.getMapTasks()) {
        for (LoggedTaskAttempt attempt : task.getAttempts()) {
            int distance = successfulMapAttemptTimes.length - 1;
            Long runtime = null;
            if (attempt.getFinishTime() > 0 && attempt.getStartTime() > 0) {
                runtime = attempt.getFinishTime() - attempt.getStartTime();
                if (attempt.getResult() == Values.SUCCESS) {
                    LoggedLocation host = attempt.getLocation();
                    List<LoggedLocation> locs = task.getPreferredLocations();
                    if (host != null && locs != null) {
                        for (LoggedLocation loc : locs) {
                            ParsedHost preferedLoc = new ParsedHost(loc);
                            distance = Math.min(distance, preferedLoc.distance(new ParsedHost(host)));
                        }
                    }
                    if (attempt.getStartTime() > 0 && attempt.getFinishTime() > 0) {
                        if (runtime != null) {
                            successfulMapAttemptTimes[distance].enter(runtime);
                        }
                    }
                    TaskAttemptID attemptID = attempt.getAttemptID();
                    if (attemptID != null) {
                        successfulNthMapperAttempts.enter(attemptID.getId());
                    }
                } else {
                    if (attempt.getResult() == Pre21JobHistoryConstants.Values.FAILED) {
                        if (runtime != null) {
                            failedMapAttemptTimes[distance].enter(runtime);
                        }
                    }
                }
            }
        }
    }
    for (LoggedTask task : result.getReduceTasks()) {
        for (LoggedTaskAttempt attempt : task.getAttempts()) {
            Long runtime = attempt.getFinishTime() - attempt.getStartTime();
            if (attempt.getFinishTime() > 0 && attempt.getStartTime() > 0) {
                runtime = attempt.getFinishTime() - attempt.getStartTime();
            }
            if (attempt.getResult() == Values.SUCCESS) {
                if (runtime != null) {
                    successfulReduceAttemptTimes.enter(runtime);
                }
            } else if (attempt.getResult() == Pre21JobHistoryConstants.Values.FAILED) {
                failedReduceAttemptTimes.enter(runtime);
            }
        }
    }
    result.setFailedMapAttemptCDFs(mapCDFArrayList(failedMapAttemptTimes));
    LoggedDiscreteCDF failedReduce = new LoggedDiscreteCDF();
    failedReduce.setCDF(failedReduceAttemptTimes, attemptTimesPercentiles, 100);
    result.setFailedReduceAttemptCDF(failedReduce);
    result.setSuccessfulMapAttemptCDFs(mapCDFArrayList(successfulMapAttemptTimes));
    LoggedDiscreteCDF succReduce = new LoggedDiscreteCDF();
    succReduce.setCDF(successfulReduceAttemptTimes, attemptTimesPercentiles, 100);
    result.setSuccessfulReduceAttemptCDF(succReduce);
    long totalSuccessfulAttempts = 0L;
    long maxTriesToSucceed = 0L;
    for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
        totalSuccessfulAttempts += ent.getValue();
        maxTriesToSucceed = Math.max(maxTriesToSucceed, ent.getKey());
    }
    if (totalSuccessfulAttempts > 0L) {
        double[] successAfterI = new double[(int) maxTriesToSucceed + 1];
        for (int i = 0; i < successAfterI.length; ++i) {
            successAfterI[i] = 0.0D;
        }
        for (Map.Entry<Long, Long> ent : successfulNthMapperAttempts) {
            successAfterI[ent.getKey().intValue()] = ((double) ent.getValue()) / totalSuccessfulAttempts;
        }
        result.setMapperTriesToSucceed(successAfterI);
    } else {
        result.setMapperTriesToSucceed(null);
    }
    return result;
}
283919.372136hadoop
private void printUsage(boolean detailed) {
    System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar" + " [options]");
    System.out.println("Options:");
    System.out.println("  -input          <path> DFS input file(s) for the Map" + " step.");
    System.out.println("  -output         <path> DFS output directory for the" + " Reduce step.");
    System.out.println("  -mapper         <cmd|JavaClassName> Optional. Command" + " to be run as mapper.");
    System.out.println("  -combiner       <cmd|JavaClassName> Optional. Command" + " to be run as combiner.");
    System.out.println("  -reducer        <cmd|JavaClassName> Optional. Command" + " to be run as reducer.");
    System.out.println("  -file           <file> Optional. File/dir to be " + "shipped in the Job jar file.\n" + "                  Deprecated. Use generic option \"-files\" instead.");
    System.out.println("  -inputformat    <TextInputFormat(default)" + "|SequenceFileAsTextInputFormat|JavaClassName>\n" + "                  Optional. The input format class.");
    System.out.println("  -outputformat   <TextOutputFormat(default)" + "|JavaClassName>\n" + "                  Optional. The output format class.");
    System.out.println("  -partitioner    <JavaClassName>  Optional. The" + " partitioner class.");
    System.out.println("  -numReduceTasks <num> Optional. Number of reduce " + "tasks.");
    System.out.println("  -inputreader    <spec> Optional. Input recordreader" + " spec.");
    System.out.println("  -cmdenv         <n>=<v> Optional. Pass env.var to" + " streaming commands.");
    System.out.println("  -mapdebug       <cmd> Optional. " + "To run this script when a map task fails.");
    System.out.println("  -reducedebug    <cmd> Optional." + " To run this script when a reduce task fails.");
    System.out.println("  -io             <identifier> Optional. Format to use" + " for input to and output");
    System.out.println("                  from mapper/reducer commands");
    System.out.println("  -lazyOutput     Optional. Lazily create Output.");
    System.out.println("  -background     Optional. Submit the job and don't wait till it completes.");
    System.out.println("  -verbose        Optional. Print verbose output.");
    System.out.println("  -info           Optional. Print detailed usage.");
    System.out.println("  -help           Optional. Print help message.");
    System.out.println();
    GenericOptionsParser.printGenericCommandUsage(System.out);
    if (!detailed) {
        System.out.println();
        System.out.println("For more details about these options:");
        System.out.println("Use " + "$HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar -info");
        return;
    }
    System.out.println();
    System.out.println("Usage tips:");
    System.out.println("In -input: globbing on <path> is supported and can " + "have multiple -input");
    System.out.println();
    System.out.println("Default Map input format: a line is a record in UTF-8 " + "the key part ends at first");
    System.out.println("  TAB, the rest of the line is the value");
    System.out.println();
    System.out.println("To pass a Custom input format:");
    System.out.println("  -inputformat package.MyInputFormat");
    System.out.println();
    System.out.println("Similarly, to pass a custom output format:");
    System.out.println("  -outputformat package.MyOutputFormat");
    System.out.println();
    System.out.println("The files with extensions .class and .jar/.zip," + " specified for the -file");
    System.out.println("  argument[s], end up in \"classes\" and \"lib\" " + "directories respectively inside");
    System.out.println("  the working directory when the mapper and reducer are" + " run. All other files");
    System.out.println("  specified for the -file argument[s]" + " end up in the working directory when the");
    System.out.println("  mapper and reducer are run. The location of this " + "working directory is");
    System.out.println("  unspecified.");
    System.out.println();
    System.out.println("To set the number of reduce tasks (num. of output " + "files) as, say 10:");
    System.out.println("  Use -numReduceTasks 10");
    System.out.println("To skip the sort/combine/shuffle/sort/reduce step:");
    System.out.println("  Use -numReduceTasks 0");
    System.out.println("  Map output then becomes a 'side-effect " + "output' rather than a reduce input.");
    System.out.println("  This speeds up processing. This also feels " + "more like \"in-place\" processing");
    System.out.println("  because the input filename and the map " + "input order are preserved.");
    System.out.println("  This is equivalent to -reducer NONE");
    System.out.println();
    System.out.println("To speed up the last maps:");
    System.out.println("  -D " + MRJobConfig.MAP_SPECULATIVE + "=true");
    System.out.println("To speed up the last reduces:");
    System.out.println("  -D " + MRJobConfig.REDUCE_SPECULATIVE + "=true");
    System.out.println("To name the job (appears in the JobTracker Web UI):");
    System.out.println("  -D " + MRJobConfig.JOB_NAME + "='My Job'");
    System.out.println("To change the local temp directory:");
    System.out.println("  -D dfs.data.dir=/tmp/dfs");
    System.out.println("  -D stream.tmpdir=/tmp/streaming");
    System.out.println("Additional local temp directories with -jt local:");
    System.out.println("  -D " + MRConfig.LOCAL_DIR + "=/tmp/local");
    System.out.println("  -D " + JTConfig.JT_SYSTEM_DIR + "=/tmp/system");
    System.out.println("  -D " + MRConfig.TEMP_DIR + "=/tmp/temp");
    System.out.println("To treat tasks with non-zero exit status as SUCCEDED:");
    System.out.println("  -D stream.non.zero.exit.is.failure=false");
    System.out.println("Use a custom hadoop streaming build along with standard" + " hadoop install:");
    System.out.println("  $HADOOP_HOME/bin/hadoop jar " + "/path/my-hadoop-streaming.jar [...]\\");
    System.out.println("    [...] -D stream.shipped.hadoopstreaming=" + "/path/my-hadoop-streaming.jar");
    System.out.println("For more details about jobconf parameters see:");
    System.out.println("  http://wiki.apache.org/hadoop/JobConfFile");
    System.out.println("Truncate the values of the job configuration copied" + "to the environment at the given length:");
    System.out.println("   -D stream.jobconf.truncate.limit=-1");
    System.out.println("To set an environment variable in a streaming " + "command:");
    System.out.println("   -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
    System.out.println();
    System.out.println("Shortcut:");
    System.out.println("   setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar " + "hadoop-streaming.jar\"");
    System.out.println();
    System.out.println("Example: $HSTREAMING -mapper " + "\"/usr/local/bin/perl5 filter.pl\"");
    System.out.println("           -file /local/filter.pl -input " + "\"/logs/0604*/*\" [...]");
    System.out.println("  Ships a script, invokes the non-shipped perl " + "interpreter. Shipped files go to");
    System.out.println("  the working directory so filter.pl is found by perl. " + "Input files are all the");
    System.out.println("  daily logs for days in month 2006-04");
}
282328.381156hadoop
public void initializeMemberVariables() {
    xmlFilename = "yarn-default.xml";
    configurationClasses = new Class[] { YarnConfiguration.class };
    configurationPropsToSkipCompare = new HashSet<String>();
    configurationPrefixToSkipCompare = new HashSet<String>();
    errorIfMissingConfigProps = true;
    errorIfMissingXmlProps = true;
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONCLIENT_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_NODEMANAGER_PROTOCOL);
    configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_RESERVATION_SYSTEM_MAX_PERIODICITY);
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
    configurationPrefixToSkipCompare.add(YarnConfiguration.FEDERATION_FLUSH_CACHE_FOR_RM_ADDR);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_EPOCH);
    configurationPropsToSkipCompare.add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS);
    configurationPropsToSkipCompare.add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS);
    configurationPropsToSkipCompare.add(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_POLICY_MANAGER);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS);
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_KEY);
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER);
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_AMRMPROXY_HB_MAX_WAIT_MS);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_SQL_JDBC_CLASS);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_SQL_USERNAME);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_SQL_PASSWORD);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_SQL_URL);
    configurationPropsToSkipCompare.add(YarnConfiguration.FEDERATION_STATESTORE_SQL_MAXCONNECTIONS);
    configurationPropsToSkipCompare.add(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_ENABLED);
    configurationPropsToSkipCompare.add(YarnConfiguration.AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD);
    configurationPrefixToSkipCompare.add("yarn.timeline-service.");
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_ADDRESS);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_NUM_RETRIES);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_TIMEOUT_MS);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_RETRY_INTERVAL_MS);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_AUTH);
    configurationPropsToSkipCompare.add(YarnConfiguration.RM_ZK_ACL);
    configurationPrefixToSkipCompare.add("yarn.app.container");
    configurationPropsToSkipCompare.add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE);
    configurationPropsToSkipCompare.add(YarnConfiguration.HADOOP_HTTP_WEBAPP_SCHEDULER_PAGE);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_CPU_RESOURCE_ENABLED);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_TAG_MAPPING_MANAGER);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_TAG_MAPPING_FILE_PATH);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_NETWORK_TAG_PREFIX);
    configurationPrefixToSkipCompare.add(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY);
    configurationPrefixToSkipCompare.add(YarnConfiguration.ROUTER_CLIENTRM_PARTIAL_RESULTS_ENABLED);
    configurationPrefixToSkipCompare.add(YarnConfiguration.ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED);
    configurationPrefixToSkipCompare.add(YarnConfiguration.ROUTER_WEBAPP_CONNECT_TIMEOUT);
    configurationPrefixToSkipCompare.add(YarnConfiguration.ROUTER_WEBAPP_READ_TIMEOUT);
    configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR);
    configurationPrefixToSkipCompare.add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
    configurationPrefixToSkipCompare.add(YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER);
    xmlPropsToSkipCompare = new HashSet<String>();
    xmlPrefixToSkipCompare = new HashSet<String>();
    xmlPropsToSkipCompare.add("yarn.nodemanager.aux-services.mapreduce_shuffle.class");
    xmlPropsToSkipCompare.add("yarn.resourcemanager.container.liveness-monitor.interval-ms");
    xmlPropsToSkipCompare.add("yarn.nodemanager.hostname");
    xmlPrefixToSkipCompare.add("yarn.timeline-service");
    xmlPrefixToSkipCompare.add("hadoop.registry");
    xmlPrefixToSkipCompare.add("yarn.log-aggregation.file-controller.TFile.class");
    initDefaultValueCollisionCheck();
    configurationPropsToSkipCompare.add(YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT);
    configurationPropsToSkipCompare.add(YarnConfiguration.LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT);
    configurationPropsToSkipCompare.add(YarnConfiguration.LOG_AGGREGATION_FILE_CONTROLLER_FMT);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_AUX_SERVICE_FMT);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS_TEMPLATE);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_OPTS_TEMPLATE);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_PATH_TEMPLATE);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_HEALTH_CHECK_SCRIPT_INTERVAL_MS_TEMPLATE);
    configurationPropsToSkipCompare.add(YarnConfiguration.NM_AUX_SERVICE_REMOTE_CLASSPATH);
    configurationPropsToSkipCompare.add(YarnConfiguration.LINUX_CONTAINER_RUNTIME_CLASS_FMT);
}
282445.8522117hadoop
protected void render(Block html) {
    BlockParameters params = verifyAndParseParameters(html);
    if (params == null) {
        return;
    }
    ApplicationId appId = params.getAppId();
    ContainerId containerId = params.getContainerId();
    NodeId nodeId = params.getNodeId();
    String appOwner = params.getAppOwner();
    String logEntity = params.getLogEntity();
    long start = params.getStartIndex();
    long end = params.getEndIndex();
    long startTime = params.getStartTime();
    long endTime = params.getEndTime();
    List<FileStatus> nodeFiles = null;
    try {
        nodeFiles = LogAggregationUtils.getRemoteNodeFileList(conf, appId, appOwner, this.fileController.getRemoteRootLogDir(), this.fileController.getRemoteRootLogDirSuffix());
    } catch (Exception ex) {
        html.h1("Unable to locate any logs for container " + containerId.toString());
        LOG.error(ex.getMessage());
        return;
    }
    Map<String, Long> checkSumFiles;
    try {
        checkSumFiles = fileController.parseCheckSumFiles(nodeFiles);
    } catch (IOException ex) {
        LOG.error("Error getting logs for " + logEntity, ex);
        html.h1("Error getting logs for " + logEntity);
        return;
    }
    List<FileStatus> fileToRead;
    try {
        fileToRead = fileController.getNodeLogFileToRead(nodeFiles, nodeId.toString(), appId);
    } catch (IOException ex) {
        LOG.error("Error getting logs for " + logEntity, ex);
        html.h1("Error getting logs for " + logEntity);
        return;
    }
    boolean foundLog = false;
    String desiredLogType = $(CONTAINER_LOG_TYPE);
    try {
        for (FileStatus thisNodeFile : fileToRead) {
            Long checkSumIndex = checkSumFiles.get(thisNodeFile.getPath().getName());
            long endIndex = -1;
            if (checkSumIndex != null) {
                endIndex = checkSumIndex.longValue();
            }
            IndexedLogsMeta indexedLogsMeta = null;
            try {
                indexedLogsMeta = fileController.loadIndexedLogsMeta(thisNodeFile.getPath(), endIndex, appId);
            } catch (Exception ex) {
                LOG.warn("Can not load log meta from the log file:" + thisNodeFile.getPath());
                continue;
            }
            if (indexedLogsMeta == null) {
                continue;
            }
            Map<ApplicationAccessType, String> appAcls = indexedLogsMeta.getAcls();
            String user = indexedLogsMeta.getUser();
            String remoteUser = request().getRemoteUser();
            if (!checkAcls(conf, appId, user, appAcls, remoteUser)) {
                html.h1().__("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity + " in log file [" + thisNodeFile.getPath().getName() + "]").__();
                LOG.error("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity);
                continue;
            }
            String compressAlgo = indexedLogsMeta.getCompressName();
            List<IndexedFileLogMeta> candidates = new ArrayList<>();
            for (IndexedPerAggregationLogMeta logMeta : indexedLogsMeta.getLogMetas()) {
                for (Entry<String, List<IndexedFileLogMeta>> meta : logMeta.getLogMetas().entrySet()) {
                    for (IndexedFileLogMeta log : meta.getValue()) {
                        if (!log.getContainerId().equals(containerId.toString())) {
                            continue;
                        }
                        if (desiredLogType != null && !desiredLogType.isEmpty() && !desiredLogType.equals(log.getFileName())) {
                            continue;
                        }
                        candidates.add(log);
                    }
                }
            }
            if (candidates.isEmpty()) {
                continue;
            }
            foundLog = readContainerLog(compressAlgo, html, thisNodeFile, start, end, candidates, startTime, endTime, foundLog, logEntity);
        }
        if (!foundLog) {
            if (desiredLogType.isEmpty()) {
                html.h1("No logs available for container " + containerId.toString());
            } else {
                html.h1("Unable to locate '" + desiredLogType + "' log for container " + containerId.toString());
            }
        }
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception ex) {
        html.h1().__("Error getting logs for " + logEntity).__();
        LOG.error("Error getting logs for " + logEntity, ex);
    }
}
283838.071135hadoop
public void testApplicationRecovery() throws Exception {
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true);
    conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
    conf.set(YarnConfiguration.YARN_ADMIN_ACL, "yarn_admin_user");
    NMStateStoreService stateStore = new NMMemoryStateStoreService();
    stateStore.init(conf);
    stateStore.start();
    Context context = createContext(conf, stateStore);
    ContainerManagerImpl cm = createContainerManager(context);
    cm.init(conf);
    cm.start();
    String appName = "app_name1";
    String appUser = "app_user1";
    String modUser = "modify_user1";
    String viewUser = "view_user1";
    String enemyUser = "enemy_user";
    ApplicationId appId = ApplicationId.newInstance(0, 1);
    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId cid = ContainerId.newContainerId(attemptId, 1);
    Map<String, LocalResource> localResources = Collections.emptyMap();
    Map<String, String> containerEnv = new HashMap<>();
    setFlowContext(containerEnv, appName, appId);
    List<String> containerCmds = Collections.emptyList();
    Map<String, ByteBuffer> serviceData = Collections.emptyMap();
    Credentials containerCreds = new Credentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    containerCreds.writeTokenStorageToStream(dob);
    ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>();
    acls.put(ApplicationAccessType.MODIFY_APP, modUser);
    acls.put(ApplicationAccessType.VIEW_APP, viewUser);
    ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, containerEnv, containerCmds, serviceData, containerTokens, acls);
    LogAggregationContext logAggregationContext = LogAggregationContext.newInstance("includePattern", "excludePattern", "includePatternInRollingAggregation", "excludePatternInRollingAggregation");
    StartContainersResponse startResponse = startContainer(context, cm, cid, clc, logAggregationContext, ContainerType.TASK);
    assertTrue(startResponse.getFailedRequests().isEmpty());
    assertEquals(1, context.getApplications().size());
    Application app = context.getApplications().get(appId);
    assertNotNull(app);
    waitForAppState(app, ApplicationState.INITING);
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    cm.stop();
    context = createContext(conf, stateStore);
    cm = createContainerManager(context);
    cm.init(conf);
    cm.start();
    assertEquals(1, context.getApplications().size());
    app = context.getApplications().get(appId);
    assertNotNull(app);
    LogAggregationContext recovered = ((ApplicationImpl) app).getLogAggregationContext();
    assertNotNull(recovered);
    assertEquals(logAggregationContext.getIncludePattern(), recovered.getIncludePattern());
    assertEquals(logAggregationContext.getExcludePattern(), recovered.getExcludePattern());
    assertEquals(logAggregationContext.getRolledLogsIncludePattern(), recovered.getRolledLogsIncludePattern());
    assertEquals(logAggregationContext.getRolledLogsExcludePattern(), recovered.getRolledLogsExcludePattern());
    waitForAppState(app, ApplicationState.INITING);
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
    finishedApps.add(appId);
    app.handle(new ApplicationFinishEvent(appId, "Application killed by ResourceManager"));
    waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
    cm.stop();
    context = createContext(conf, stateStore);
    cm = createContainerManager(context);
    cm.init(conf);
    cm.start();
    assertEquals(1, context.getApplications().size());
    app = context.getApplications().get(appId);
    assertNotNull(app);
    app.handle(new ApplicationFinishEvent(appId, "Application killed by ResourceManager"));
    waitForAppState(app, ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.MODIFY_APP, appUser, appId));
    assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser), ApplicationAccessType.VIEW_APP, appUser, appId));
    app.handle(new ApplicationEvent(app.getAppId(), ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
    assertThat(app.getApplicationState()).isEqualTo(ApplicationState.FINISHED);
    app.handle(new ApplicationEvent(app.getAppId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
    cm.stop();
    context = createContext(conf, stateStore);
    cm = createContainerManager(context);
    cm.init(conf);
    cm.start();
    assertTrue(context.getApplications().isEmpty());
    cm.stop();
}
282327.8621114hadoop
private ContainerAllocation assignContainer(Resource clusterResource, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, PendingAsk pendingAsk, NodeType type, RMContainer rmContainer, SchedulingMode schedulingMode, ResourceLimits currentResourceLimits) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("assignContainers: node=" + node.getNodeName() + " application=" + application.getApplicationId() + " priority=" + schedulerKey.getPriority() + " pendingAsk=" + pendingAsk + " type=" + type);
    }
    Resource capability = pendingAsk.getPerAllocationResource();
    Resource available = node.getUnallocatedResource();
    Resource totalResource = node.getTotalResource();
    if (!Resources.fitsIn(rc, capability, totalResource)) {
        LOG.warn("Node : " + node.getNodeID() + " does not have sufficient resource for ask : " + pendingAsk + " node total capability : " + node.getTotalResource());
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_TOTAL_RESOURCE_INSUFFICIENT_FOR_REQUEST + getResourceDiagnostics(capability, totalResource), ActivityLevel.NODE);
        return ContainerAllocation.LOCALITY_SKIPPED;
    }
    boolean shouldAllocOrReserveNewContainer = shouldAllocOrReserveNewContainer(schedulerKey, capability);
    long availableContainers = rc.computeAvailableContainers(available, capability);
    Resource availableForDC = available;
    Resource resourceNeedToUnReserve = Resources.max(rc, clusterResource, Resources.subtract(capability, currentResourceLimits.getHeadroom()), currentResourceLimits.getAmountNeededUnreserve());
    boolean needToUnreserve = rc.isAnyMajorResourceAboveZero(resourceNeedToUnReserve);
    RMContainer unreservedContainer = null;
    boolean reservationsContinueLooking = application.getCSLeafQueue().isReservationsContinueLooking();
    List<RMContainer> toKillContainers = null;
    if (availableContainers == 0 && currentResourceLimits.isAllowPreemption()) {
        Resource availableAndKillable = Resources.clone(available);
        for (RMContainer killableContainer : node.getKillableContainers().values()) {
            if (null == toKillContainers) {
                toKillContainers = new ArrayList<>();
            }
            toKillContainers.add(killableContainer);
            Resources.addTo(availableAndKillable, killableContainer.getAllocatedResource());
            if (Resources.fitsIn(rc, capability, availableAndKillable)) {
                availableContainers = 1;
                break;
            }
        }
        availableForDC = availableAndKillable;
    }
    if (availableContainers > 0) {
        if (rmContainer == null && reservationsContinueLooking) {
            if (!shouldAllocOrReserveNewContainer || needToUnreserve) {
                if (!needToUnreserve) {
                    resourceNeedToUnReserve = capability;
                }
                unreservedContainer = application.findNodeToUnreserve(node, schedulerKey, resourceNeedToUnReserve);
                if (null == unreservedContainer) {
                    ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_CAN_NOT_FIND_CONTAINER_TO_BE_UNRESERVED_WHEN_NEEDED, ActivityLevel.NODE);
                    return ContainerAllocation.LOCALITY_SKIPPED;
                }
            }
        }
        ContainerAllocation result = new ContainerAllocation(unreservedContainer, pendingAsk.getPerAllocationResource(), AllocationState.ALLOCATED);
        result.containerNodeType = type;
        result.setToKillContainers(toKillContainers);
        return result;
    } else {
        if (shouldAllocOrReserveNewContainer || rmContainer != null) {
            if (reservationsContinueLooking && rmContainer == null) {
                if (needToUnreserve) {
                    LOG.debug("we needed to unreserve to be able to allocate");
                    ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_DO_NOT_HAVE_SUFFICIENT_RESOURCE + getResourceDiagnostics(capability, availableForDC), ActivityLevel.NODE);
                    return ContainerAllocation.LOCALITY_SKIPPED;
                }
            }
            ActivitiesLogger.APP.recordAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_DO_NOT_HAVE_SUFFICIENT_RESOURCE + getResourceDiagnostics(capability, availableForDC), rmContainer == null ? ActivityState.RESERVED : ActivityState.RE_RESERVED, ActivityLevel.NODE);
            ContainerAllocation result = new ContainerAllocation(null, pendingAsk.getPerAllocationResource(), AllocationState.RESERVED);
            result.containerNodeType = type;
            result.setToKillContainers(null);
            return result;
        }
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_DO_NOT_HAVE_SUFFICIENT_RESOURCE + getResourceDiagnostics(capability, availableForDC), ActivityLevel.NODE);
        return ContainerAllocation.LOCALITY_SKIPPED;
    }
}
282721.4718115hadoop
public int run(String[] args) {
    LOG.info("Starting ZKRMStateStorePerf ver." + version);
    int numApp = ZK_PERF_NUM_APP_DEFAULT;
    int numAppAttemptPerApp = ZK_PERF_NUM_APPATTEMPT_PER_APP;
    String hostPort = null;
    boolean launchLocalZK = true;
    if (args.length == 0) {
        System.err.println("Missing arguments.");
        return -1;
    }
    for (int i = 0; i < args.length; i++) {
        if (args[i].equalsIgnoreCase("-appsize")) {
            numApp = Integer.parseInt(args[++i]);
        } else if (args[i].equalsIgnoreCase("-appattemptsize")) {
            numAppAttemptPerApp = Integer.parseInt(args[++i]);
        } else if (args[i].equalsIgnoreCase("-hostPort")) {
            hostPort = args[++i];
            launchLocalZK = false;
        } else if (args[i].equalsIgnoreCase("-workingZnode")) {
            workingZnode = args[++i];
        } else {
            System.err.println("Illegal argument: " + args[i]);
            return -1;
        }
    }
    if (launchLocalZK) {
        try {
            setUpZKServer();
        } catch (Exception e) {
            System.err.println("failed to setup. : " + e.getMessage());
            return -1;
        }
    }
    initStore(hostPort);
    long submitTime = System.currentTimeMillis();
    long startTime = System.currentTimeMillis() + 1234;
    ArrayList<ApplicationId> applicationIds = new ArrayList<>();
    ArrayList<RMApp> rmApps = new ArrayList<>();
    ArrayList<ApplicationAttemptId> attemptIds = new ArrayList<>();
    HashMap<ApplicationId, Set<ApplicationAttemptId>> appIdsToAttemptId = new HashMap<>();
    TestDispatcher dispatcher = new TestDispatcher();
    store.setRMDispatcher(dispatcher);
    for (int i = 0; i < numApp; i++) {
        ApplicationId appId = ApplicationId.newInstance(clusterTimeStamp, i);
        applicationIds.add(appId);
        ArrayList<ApplicationAttemptId> attemptIdsForThisApp = new ArrayList<>();
        for (int j = 0; j < numAppAttemptPerApp; j++) {
            ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, j);
            attemptIdsForThisApp.add(attemptId);
        }
        appIdsToAttemptId.put(appId, new LinkedHashSet(attemptIdsForThisApp));
        attemptIds.addAll(attemptIdsForThisApp);
    }
    for (ApplicationId appId : applicationIds) {
        RMApp app = null;
        try {
            app = storeApp(store, appId, submitTime, startTime);
        } catch (Exception e) {
            System.err.println("failed to create Application Znode. : " + e.getMessage());
            return -1;
        }
        waitNotify(dispatcher);
        rmApps.add(app);
    }
    for (ApplicationAttemptId attemptId : attemptIds) {
        Token<AMRMTokenIdentifier> tokenId = generateAMRMToken(attemptId, appTokenMgr);
        SecretKey clientTokenKey = clientToAMTokenMgr.createMasterKey(attemptId);
        try {
            storeAttempt(store, attemptId, ContainerId.newContainerId(attemptId, 0L).toString(), tokenId, clientTokenKey, dispatcher);
        } catch (Exception e) {
            System.err.println("failed to create AppAttempt Znode. : " + e.getMessage());
            return -1;
        }
    }
    long storeStart = System.currentTimeMillis();
    try {
        store.loadState();
    } catch (Exception e) {
        System.err.println("failed to locaState from ZKRMStateStore. : " + e.getMessage());
        return -1;
    }
    long storeEnd = System.currentTimeMillis();
    long loadTime = storeEnd - storeStart;
    String resultMsg = "ZKRMStateStore takes " + loadTime + " msec to loadState.";
    LOG.info(resultMsg);
    System.out.println(resultMsg);
    try {
        for (RMApp app : rmApps) {
            ApplicationStateData appState = ApplicationStateData.newInstance(app.getSubmitTime(), app.getStartTime(), app.getApplicationSubmissionContext(), app.getUser());
            ApplicationId appId = app.getApplicationId();
            Map m = mock(Map.class);
            when(m.keySet()).thenReturn(appIdsToAttemptId.get(appId));
            appState.attempts = m;
            store.removeApplicationStateInternal(appState);
        }
    } catch (Exception e) {
        System.err.println("failed to cleanup. : " + e.getMessage());
        return -1;
    }
    return 0;
}
284726.811119hadoop
public void testReservationLimitOtherUsers() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf, true);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_1, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_1.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_1, user_1);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    cs.getNodeTracker().addNode(node_0);
    cs.getNodeTracker().addNode(node_1);
    cs.getNodeTracker().addNode(node_2);
    final int numNodes = 3;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(22 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(4 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(4 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(20 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(2 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 8 * GB, 2, true, priorityMap, recordFactory)));
    app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 2, true, priorityMap, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(12 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, a.getMetrics().getReservedMB());
    assertEquals(4 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(12 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(2 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(14 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize());
    assertEquals(8 * GB, a.getMetrics().getReservedMB());
    assertEquals(6 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(10 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(4 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize());
}
284841.731126hadoop
public void testAssignContainersNeedToUnreserve() throws Exception {
    GenericTestUtils.setRootLogLevel(Level.DEBUG);
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1);
    cs.getNodeTracker().addNode(node_0);
    cs.getNodeTracker().addNode(node_1);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(14 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(8 * GB, a.getMetrics().getAvailableMB());
    assertEquals(8 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(2, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize());
    assertEquals(1, app_0.getOutstandingAsksCount(toSchedulerKey(priorityReduce)));
}
284586.181122hadoop
public void testAssignToQueue() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(14 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(8 * GB, a.getMetrics().getAvailableMB());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    ResourceLimits limits = new ResourceLimits(Resources.createResource(13 * GB));
    boolean res = a.canAssignToThisQueue(Resources.createResource(13 * GB), RMNodeLabelsManager.NO_LABEL, limits, Resources.createResource(3 * GB), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertTrue(res);
    assertEquals(0, limits.getHeadroom().getMemorySize());
    refreshQueuesTurnOffReservationsContLook(a, csConf);
    limits = new ResourceLimits(Resources.createResource(13 * GB));
    res = a.canAssignToThisQueue(Resources.createResource(13 * GB), RMNodeLabelsManager.NO_LABEL, limits, Resources.createResource(3 * GB), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertFalse(res);
}
284834.251122hadoop
public void testAssignToUser() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    setup(csConf);
    LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
    final String user_0 = "user_0";
    final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_0 = spy(app_0);
    Mockito.doNothing().when(app_0).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    rmContext.getRMApps().put(app_0.getApplicationId(), mock(RMApp.class));
    a.submitApplicationAttempt(app_0, user_0);
    final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
    app_1 = spy(app_1);
    Mockito.doNothing().when(app_1).updateAMContainerDiagnostics(any(AMState.class), any(String.class));
    a.submitApplicationAttempt(app_1, user_0);
    String host_0 = "host_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
    String host_1 = "host_1";
    FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB);
    String host_2 = "host_2";
    FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, DEFAULT_RACK, 0, 8 * GB);
    Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
    Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
    when(csContext.getNode(node_0.getNodeID())).thenReturn(node_0);
    when(csContext.getNode(node_1.getNodeID())).thenReturn(node_1);
    when(csContext.getNode(node_2.getNodeID())).thenReturn(node_2);
    final int numNodes = 2;
    Resource clusterResource = Resources.createResource(numNodes * (8 * GB));
    when(csContext.getNumClusterNodes()).thenReturn(numNodes);
    root.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    Priority priorityAM = TestUtils.createMockPriority(1);
    Priority priorityMap = TestUtils.createMockPriority(5);
    Priority priorityReduce = TestUtils.createMockPriority(10);
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory)));
    app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, priorityReduce, recordFactory)));
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(2 * GB, a.getUsedResources().getMemorySize());
    assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(14 * GB, a.getMetrics().getAvailableMB());
    assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(5 * GB, a.getUsedResources().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(11 * GB, a.getMetrics().getAvailableMB());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(8 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(0 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(8 * GB, a.getMetrics().getAvailableMB());
    assertEquals(null, node_0.getReservedContainer());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    TestUtils.applyResourceCommitRequest(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), nodes, apps, csConf);
    assertEquals(13 * GB, a.getUsedResources().getMemorySize());
    assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize());
    assertEquals(5 * GB, app_0.getCurrentReservation().getMemorySize());
    assertEquals(5 * GB, a.getMetrics().getReservedMB());
    assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
    assertEquals(3 * GB, a.getMetrics().getAvailableMB());
    assertEquals(3 * GB, app_0.getHeadroom().getMemorySize());
    assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize());
    assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize());
    Resource limit = Resources.createResource(14 * GB, 0);
    ResourceLimits userResourceLimits = new ResourceLimits(clusterResource);
    boolean res = a.canAssignToUser(clusterResource, user_0, limit, app_0, "", userResourceLimits);
    assertTrue(res);
    assertEquals(Resources.none(), userResourceLimits.getAmountNeededUnreserve());
    limit = Resources.createResource(12 * GB, 0);
    userResourceLimits = new ResourceLimits(clusterResource);
    res = a.canAssignToUser(clusterResource, user_0, limit, app_0, "", userResourceLimits);
    assertTrue(res);
    assertEquals(Resources.createResource(1 * GB, 4), userResourceLimits.getAmountNeededUnreserve());
    refreshQueuesTurnOffReservationsContLook(a, csConf);
    userResourceLimits = new ResourceLimits(clusterResource);
    res = a.canAssignToUser(clusterResource, user_0, limit, app_0, "", userResourceLimits);
    assertFalse(res);
    assertEquals(Resources.none(), userResourceLimits.getAmountNeededUnreserve());
}
283646.711135hadoop
public void testFifoScheduler() throws Exception {
    LOG.info("--- START: testFifoScheduler ---");
    final int GB = 1024;
    NodeStatus mockNodeStatus = createMockNodeStatus();
    String host_0 = "host_0";
    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(4 * GB, 1), mockNodeStatus);
    nm_0.heartbeat();
    String host_1 = "host_1";
    org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(2 * GB, 1), mockNodeStatus);
    nm_1.heartbeat();
    Priority priority_0 = Priority.newInstance(0);
    Priority priority_1 = Priority.newInstance(1);
    Application application_0 = new Application("user_0", resourceManager);
    application_0.submit();
    application_0.addNodeManager(host_0, 1234, nm_0);
    application_0.addNodeManager(host_1, 1234, nm_1);
    Resource capability_0_0 = Resources.createResource(GB);
    application_0.addResourceRequestSpec(priority_1, capability_0_0);
    Resource capability_0_1 = Resources.createResource(2 * GB);
    application_0.addResourceRequestSpec(priority_0, capability_0_1);
    Task task_0_0 = new Task(application_0, priority_1, new String[] { host_0, host_1 });
    application_0.addTask(task_0_0);
    Application application_1 = new Application("user_1", resourceManager);
    application_1.submit();
    application_1.addNodeManager(host_0, 1234, nm_0);
    application_1.addNodeManager(host_1, 1234, nm_1);
    Resource capability_1_0 = Resources.createResource(3 * GB);
    application_1.addResourceRequestSpec(priority_1, capability_1_0);
    Resource capability_1_1 = Resources.createResource(4 * GB);
    application_1.addResourceRequestSpec(priority_0, capability_1_1);
    Task task_1_0 = new Task(application_1, priority_1, new String[] { host_0, host_1 });
    application_1.addTask(task_1_0);
    LOG.info("Send resource requests to the scheduler");
    application_0.schedule();
    application_1.schedule();
    LOG.info("Send a heartbeat to kick the tires on the Scheduler... " + "nm0 -> task_0_0 and task_1_0 allocated, used=4G " + "nm1 -> nothing allocated");
    nm_0.heartbeat();
    nm_1.heartbeat();
    application_0.schedule();
    checkApplicationResourceUsage(GB, application_0);
    application_1.schedule();
    checkApplicationResourceUsage(3 * GB, application_1);
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkNodeResourceUsage(4 * GB, nm_0);
    checkNodeResourceUsage(0 * GB, nm_1);
    LOG.info("Adding new tasks...");
    Task task_1_1 = new Task(application_1, priority_1, new String[] { ResourceRequest.ANY });
    application_1.addTask(task_1_1);
    Task task_1_2 = new Task(application_1, priority_1, new String[] { ResourceRequest.ANY });
    application_1.addTask(task_1_2);
    Task task_1_3 = new Task(application_1, priority_0, new String[] { ResourceRequest.ANY });
    application_1.addTask(task_1_3);
    application_1.schedule();
    Task task_0_1 = new Task(application_0, priority_1, new String[] { host_0, host_1 });
    application_0.addTask(task_0_1);
    Task task_0_2 = new Task(application_0, priority_1, new String[] { host_0, host_1 });
    application_0.addTask(task_0_2);
    Task task_0_3 = new Task(application_0, priority_0, new String[] { ResourceRequest.ANY });
    application_0.addTask(task_0_3);
    application_0.schedule();
    LOG.info("Sending hb from " + nm_0.getHostName());
    nm_0.heartbeat();
    LOG.info("Sending hb from " + nm_1.getHostName());
    nm_1.heartbeat();
    LOG.info("Trying to allocate...");
    application_0.schedule();
    checkApplicationResourceUsage(3 * GB, application_0);
    application_1.schedule();
    checkApplicationResourceUsage(3 * GB, application_1);
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkNodeResourceUsage(4 * GB, nm_0);
    checkNodeResourceUsage(2 * GB, nm_1);
    LOG.info("Finishing up task_0_0");
    application_0.finishTask(task_0_0);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(3 * GB, application_0);
    checkApplicationResourceUsage(3 * GB, application_1);
    checkNodeResourceUsage(4 * GB, nm_0);
    checkNodeResourceUsage(2 * GB, nm_1);
    LOG.info("Finishing up task_1_0");
    application_1.finishTask(task_1_0);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(4 * GB, application_0);
    checkApplicationResourceUsage(0 * GB, application_1);
    checkNodeResourceUsage(2 * GB, nm_1);
    LOG.info("Finishing up task_0_3");
    application_0.finishTask(task_0_3);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(2 * GB, application_0);
    checkApplicationResourceUsage(0 * GB, application_1);
    checkNodeResourceUsage(0 * GB, nm_1);
    LOG.info("Finishing up task_0_1");
    application_0.finishTask(task_0_1);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(1 * GB, application_0);
    checkApplicationResourceUsage(0 * GB, application_1);
    LOG.info("Finishing up task_0_2");
    application_0.finishTask(task_0_2);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(0 * GB, application_0);
    checkApplicationResourceUsage(4 * GB, application_1);
    LOG.info("Finishing up task_1_3");
    application_1.finishTask(task_1_3);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(0 * GB, application_0);
    checkApplicationResourceUsage(3 * GB, application_1);
    LOG.info("Finishing up task_1_1");
    application_1.finishTask(task_1_1);
    application_0.schedule();
    application_1.schedule();
    nm_0.heartbeat();
    nm_1.heartbeat();
    checkApplicationResourceUsage(0 * GB, application_0);
    checkApplicationResourceUsage(3 * GB, application_1);
    LOG.info("--- END: testFifoScheduler ---");
}
284033.475120hadoop
public void testContainerAutoUpdateContainer() throws Exception {
    rm.stop();
    createAndStartRMWithAutoUpdateContainer();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nm1.registerNode();
    nm1.nodeHeartbeat(oppContainersStatus, true);
    OpportunisticContainerAllocatorAMService amservice = (OpportunisticContainerAllocatorAMService) rm.getApplicationMasterService();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("user").withAcls(null).withQueue("default").withUnmanagedAM(false).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm, data);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    nm1.nodeHeartbeat(oppContainersStatus, true);
    GenericTestUtils.waitFor(() -> amservice.getLeastLoadedNodes().size() == 1, 10, 10 * 100);
    AllocateResponse allocateResponse = am1.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(1 * GB), 2, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true))), null);
    List<Container> allocatedContainers = allocateResponse.getAllocatedContainers();
    allocatedContainers.addAll(am1.allocate(null, null).getAllocatedContainers());
    Assert.assertEquals(2, allocatedContainers.size());
    Container container = allocatedContainers.get(0);
    nm1.nodeHeartbeat(Arrays.asList(ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true);
    rm.drainEvents();
    RMContainer rmContainer = ((CapacityScheduler) scheduler).getApplicationAttempt(container.getId().getApplicationAttemptId()).getRMContainer(container.getId());
    Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState());
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED)));
    nm1.nodeHeartbeat(Arrays.asList(ContainerStatus.newInstance(container.getId(), ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)), true);
    rm.drainEvents();
    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    UpdatedContainer uc = allocateResponse.getUpdatedContainers().get(0);
    Assert.assertEquals(container.getId(), uc.getContainer().getId());
    Assert.assertEquals(ExecutionType.GUARANTEED, uc.getContainer().getExecutionType());
    NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
    Assert.assertEquals(1, response.getContainersToUpdate().size());
    Container containersFromNM = response.getContainersToUpdate().get(0);
    Assert.assertEquals(container.getId(), containersFromNM.getId());
    Assert.assertEquals(ExecutionType.GUARANTEED, containersFromNM.getExecutionType());
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(1, container.getId(), ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(2 * GB, 1), null)));
    response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus.newInstance(container.getId(), ExecutionType.GUARANTEED, ContainerState.RUNNING, "", 0)), true);
    rm.drainEvents();
    if (allocateResponse.getUpdatedContainers().size() == 0) {
        allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    }
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    uc = allocateResponse.getUpdatedContainers().get(0);
    Assert.assertEquals(container.getId(), uc.getContainer().getId());
    Assert.assertEquals(Resource.newInstance(2 * GB, 1), uc.getContainer().getResource());
    rm.drainEvents();
    if (response.getContainersToUpdate().size() == 0) {
        response = nm1.nodeHeartbeat(true);
    }
    Assert.assertEquals(1, response.getContainersToUpdate().size());
    Assert.assertEquals(Resource.newInstance(2 * GB, 1), response.getContainersToUpdate().get(0).getResource());
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(2, container.getId(), ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(1 * GB, 1), null)));
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    rm.drainEvents();
    response = nm1.nodeHeartbeat(true);
    Assert.assertEquals(1, response.getContainersToUpdate().size());
    Assert.assertEquals(Resource.newInstance(1 * GB, 1), response.getContainersToUpdate().get(0).getResource());
    nm1.nodeHeartbeat(oppContainersStatus, true);
    allocateResponse = am1.sendContainerUpdateRequest(Arrays.asList(UpdateContainerRequest.newInstance(3, container.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE, null, ExecutionType.OPPORTUNISTIC)));
    response = nm1.nodeHeartbeat(Arrays.asList(ContainerStatus.newInstance(container.getId(), ExecutionType.GUARANTEED, ContainerState.RUNNING, "", 0)), true);
    rm.drainEvents();
    if (allocateResponse.getUpdatedContainers().size() == 0) {
        allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    }
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    uc = allocateResponse.getUpdatedContainers().get(0);
    Assert.assertEquals(ExecutionType.OPPORTUNISTIC, uc.getContainer().getExecutionType());
    if (response.getContainersToUpdate().size() == 0) {
        response = nm1.nodeHeartbeat(oppContainersStatus, true);
    }
    Assert.assertEquals(1, response.getContainersToUpdate().size());
    Assert.assertEquals(ExecutionType.OPPORTUNISTIC, response.getContainersToUpdate().get(0).getExecutionType());
}
283341.8913119hadoop
public void testNodeRemovalUtil(boolean doGraceful) throws Exception {
    Configuration conf = new Configuration();
    int timeoutValue = 500;
    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, "");
    conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, "");
    conf.setInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, timeoutValue);
    CountDownLatch latch = new CountDownLatch(1);
    rm = new MockRM(conf);
    rm.init(conf);
    rm.start();
    RMContext rmContext = rm.getRMContext();
    refreshNodesOption(doGraceful, conf);
    MockNM nm1 = rm.registerNode("host1:1234", 5120);
    MockNM nm2 = rm.registerNode("host2:5678", 10240);
    MockNM nm3 = rm.registerNode("localhost:4433", 1024);
    ClusterMetrics metrics = ClusterMetrics.getMetrics();
    assert (metrics != null);
    NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
    nodeHeartbeat = nm2.nodeHeartbeat(true);
    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
    nodeHeartbeat = nm3.nodeHeartbeat(true);
    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
    rm.drainEvents();
    Assert.assertEquals("All 3 nodes should be active", metrics.getNumActiveNMs(), 3);
    String ip = NetUtils.normalizeHostName("localhost");
    writeToHostsFile("host1", ip);
    conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile.getAbsolutePath());
    refreshNodesOption(doGraceful, conf);
    if (doGraceful) {
        rm.waitForState(nm2.getNodeId(), NodeState.DECOMMISSIONING);
    }
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    rm.drainEvents();
    Assert.assertTrue("Node should not be in active node list", !rmContext.getRMNodes().containsKey(nm2.getNodeId()));
    RMNode rmNode = rmContext.getInactiveRMNodes().get(nm2.getNodeId());
    Assert.assertEquals("Node should be in inactive node list", rmNode.getState(), doGraceful ? NodeState.DECOMMISSIONED : NodeState.SHUTDOWN);
    Assert.assertEquals("Active nodes should be 2", metrics.getNumActiveNMs(), 2);
    Assert.assertEquals("Shutdown nodes should be expected", metrics.getNumShutdownNMs(), doGraceful ? 0 : 1);
    int nodeRemovalTimeout = conf.getInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, YarnConfiguration.DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
    int nodeRemovalInterval = rmContext.getNodesListManager().getNodeRemovalCheckInterval();
    long maxThreadSleeptime = nodeRemovalInterval + nodeRemovalTimeout + 100;
    latch.await(maxThreadSleeptime, TimeUnit.MILLISECONDS);
    rmNode = rmContext.getInactiveRMNodes().get(nm2.getNodeId());
    Assert.assertEquals("Node should have been forgotten!", rmNode, null);
    Assert.assertEquals("Shutdown nodes should be 0 now", metrics.getNumShutdownNMs(), 0);
    writeToHostsFile("host1", ip, "host2");
    refreshNodesOption(doGraceful, conf);
    nm2 = rm.registerNode("host2:5678", 10240);
    rm.drainEvents();
    writeToHostsFile("host1", ip);
    refreshNodesOption(doGraceful, conf);
    rm.waitForState(nm2.getNodeId(), doGraceful ? NodeState.DECOMMISSIONING : NodeState.SHUTDOWN);
    nm2.nodeHeartbeat(true);
    rm.drainEvents();
    rmNode = rmContext.getInactiveRMNodes().get(nm2.getNodeId());
    Assert.assertEquals("Node should be shutdown", rmNode.getState(), doGraceful ? NodeState.DECOMMISSIONED : NodeState.SHUTDOWN);
    Assert.assertEquals("Active nodes should be 2", metrics.getNumActiveNMs(), 2);
    Assert.assertEquals("Shutdown nodes should be expected", metrics.getNumShutdownNMs(), doGraceful ? 0 : 1);
    latch.await(maxThreadSleeptime - 2000, TimeUnit.MILLISECONDS);
    writeToHostsFile("host1", ip, "host2");
    refreshNodesOption(doGraceful, conf);
    nm2 = rm.registerNode("host2:5678", 10240);
    nodeHeartbeat = nm2.nodeHeartbeat(true);
    rm.drainEvents();
    Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
    Assert.assertEquals("Shutdown nodes should be 0 now", metrics.getNumShutdownNMs(), 0);
    Assert.assertEquals("All 3 nodes should be active", metrics.getNumActiveNMs(), 3);
    writeToHostsFile("host1", "host2", ip);
    writeToHostsFile(excludeHostFile, "host2");
    conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, excludeHostFile.getAbsolutePath());
    refreshNodesOption(doGraceful, conf);
    rm.drainEvents();
    rmNode = doGraceful ? rmContext.getRMNodes().get(nm2.getNodeId()) : rmContext.getInactiveRMNodes().get(nm2.getNodeId());
    Assert.assertTrue("Node should be DECOMMISSIONED or DECOMMISSIONING", (rmNode.getState() == NodeState.DECOMMISSIONED) || (rmNode.getState() == NodeState.DECOMMISSIONING));
    if (rmNode.getState() == NodeState.DECOMMISSIONED) {
        Assert.assertEquals("Decommissioned/ing nodes should be 1 now", metrics.getNumDecommisionedNMs(), 1);
    }
    latch.await(maxThreadSleeptime, TimeUnit.MILLISECONDS);
    rmNode = doGraceful ? rmContext.getRMNodes().get(nm2.getNodeId()) : rmContext.getInactiveRMNodes().get(nm2.getNodeId());
    Assert.assertTrue("Node should be DECOMMISSIONED or DECOMMISSIONING", (rmNode.getState() == NodeState.DECOMMISSIONED) || (rmNode.getState() == NodeState.DECOMMISSIONING));
    if (rmNode.getState() == NodeState.DECOMMISSIONED) {
        Assert.assertEquals("Decommissioned/ing nodes should be 1 now", metrics.getNumDecommisionedNMs(), 1);
    }
    testNodeRemovalUtilDecomToUntracked(rmContext, conf, nm1, nm2, nm3, doGraceful);
    rm.stop();
}
283036.4225100hadoop
public void testGetFlowApps() throws Exception {
    Client client = createClient();
    try {
        URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" + "fields=ALL");
        ClientResponse resp = getResponse(client, uri);
        Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(3, entities.size());
        for (TimelineEntity entity : entities) {
            assertTrue("Unexpected app in result", (entity.getId().equals("application_1111111111_1111") && entity.getConfigs().size() == 1 && entity.getConfigs().equals(ImmutableMap.of("cfg2", "value1"))) || (entity.getId().equals("application_1111111111_2222") && entity.getConfigs().size() == 1 && entity.getConfigs().equals(ImmutableMap.of("cfg1", "value1"))) || (entity.getId().equals("application_1111111111_2224") && entity.getConfigs().size() == 0));
            for (TimelineMetric metric : entity.getMetrics()) {
                if (entity.getId().equals("application_1111111111_1111")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.SINGLE_VALUE, "HDFS_BYTES_READ", ts - 80000, 57L);
                    TimelineMetric m2 = newMetric(TimelineMetric.Type.SINGLE_VALUE, "MAP_SLOT_MILLIS", ts - 80000, 40L);
                    TimelineMetric m3 = newMetric(TimelineMetric.Type.SINGLE_VALUE, "MAP1_SLOT_MILLIS", ts - 80000, 40L);
                    assertTrue(verifyMetrics(metric, m1, m2, m3));
                } else if (entity.getId().equals("application_1111111111_2222")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.SINGLE_VALUE, "MAP_SLOT_MILLIS", ts - 80000, 101L);
                    assertTrue(verifyMetrics(metric, m1));
                } else if (entity.getId().equals("application_1111111111_2224")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.SINGLE_VALUE, "MAP_SLOT_MILLIS", ts - 80000, 101L);
                    assertTrue(verifyMetrics(metric, m1));
                }
            }
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" + "fields=ALL&metricslimit=6");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(3, entities.size());
        for (TimelineEntity entity : entities) {
            assertTrue("Unexpected app in result", (entity.getId().equals("application_1111111111_1111") && entity.getConfigs().size() == 1 && entity.getConfigs().equals(ImmutableMap.of("cfg2", "value1"))) || (entity.getId().equals("application_1111111111_2222") && entity.getConfigs().size() == 1 && entity.getConfigs().equals(ImmutableMap.of("cfg1", "value1"))) || (entity.getId().equals("application_1111111111_2224") && entity.getConfigs().size() == 0));
            for (TimelineMetric metric : entity.getMetrics()) {
                if (entity.getId().equals("application_1111111111_1111")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.TIME_SERIES, "HDFS_BYTES_READ", ts - 80000, 57L);
                    m1.addValue(ts - 100000, 31L);
                    TimelineMetric m2 = newMetric(TimelineMetric.Type.TIME_SERIES, "MAP_SLOT_MILLIS", ts - 80000, 40L);
                    m2.addValue(ts - 100000, 2L);
                    TimelineMetric m3 = newMetric(TimelineMetric.Type.TIME_SERIES, "MAP1_SLOT_MILLIS", ts - 80000, 40L);
                    m3.addValue(ts - 100000, 2L);
                    assertTrue(verifyMetrics(metric, m1, m2, m3));
                } else if (entity.getId().equals("application_1111111111_2222")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.TIME_SERIES, "MAP_SLOT_MILLIS", ts - 80000, 101L);
                    m1.addValue(ts - 100000, 5L);
                    assertTrue(verifyMetrics(metric, m1));
                } else if (entity.getId().equals("application_1111111111_2224")) {
                    TimelineMetric m1 = newMetric(TimelineMetric.Type.TIME_SERIES, "MAP_SLOT_MILLIS", ts - 80000, 101L);
                    m1.addValue(ts - 100000, 5L);
                    assertTrue(verifyMetrics(metric, m1));
                }
            }
        }
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/users/user1/flows/flow_name/apps");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(3, entities.size());
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/users/user1/flows/flow_name/apps?limit=1");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(1, entities.size());
    } finally {
        client.destroy();
    }
}
282984.5212120hadoop
public void testReadAppsEventFilters() throws Exception {
    TimelineFilterList ef = new TimelineFilterList();
    ef.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(1, entities.size());
    int eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity id should have been application_1111111111_4444");
        }
    }
    assertEquals(1, eventCnt);
    TimelineFilterList ef1 = new TimelineFilterList();
    ef1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef1.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity id should have been application_1111111111_4444");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef2 = new TimelineFilterList();
    ef2.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222") && !timelineEntity.getId().equals("application_1111111111_4444")) {
            Assert.fail("Entity ids' should have been application_1111111111_2222" + " and application_1111111111_4444");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef3 = new TimelineFilterList();
    ef3.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef3.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "dummy_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef3).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    list1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "dummy_event"));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "start_event"));
    TimelineFilterList ef4 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef4).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222")) {
            Assert.fail("Entity id should have been application_1111111111_2222");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef5 = new TimelineFilterList();
    ef5.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "update_event"));
    ef5.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().eventFilters(ef5).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("application_1111111111_2222")) {
            Assert.fail("Entity id should have been application_1111111111_2222");
        }
    }
    assertEquals(0, eventCnt);
}
282578.8214123kafka
private void generateClassReader(String className, StructSpec struct, Versions parentVersions) {
    headerGenerator.addImport(MessageGenerator.READABLE_CLASS);
    buffer.printf("@Override%n");
    buffer.printf("public final void read(Readable _readable, short _version) {%n");
    buffer.incrementIndent();
    VersionConditional.forVersions(parentVersions, struct.versions()).allowMembershipCheckAlwaysFalse(false).ifNotMember(__ -> {
        headerGenerator.addImport(MessageGenerator.UNSUPPORTED_VERSION_EXCEPTION_CLASS);
        buffer.printf("throw new UnsupportedVersionException(\"Can't read " + "version \" + _version + \" of %s\");%n", className);
    }).generate(buffer);
    Versions curVersions = parentVersions.intersect(struct.versions());
    for (FieldSpec field : struct.fields()) {
        Versions fieldFlexibleVersions = fieldFlexibleVersions(field);
        if (!field.taggedVersions().intersect(fieldFlexibleVersions).equals(field.taggedVersions())) {
            throw new RuntimeException("Field " + field.name() + " specifies tagged " + "versions " + field.taggedVersions() + " that are not a subset of the " + "flexible versions " + fieldFlexibleVersions);
        }
        Versions mandatoryVersions = field.versions().subtract(field.taggedVersions());
        VersionConditional.forVersions(mandatoryVersions, curVersions).alwaysEmitBlockScope(field.type().isVariableLength()).ifNotMember(__ -> {
            buffer.printf("this.%s = %s;%n", field.camelCaseName(), field.fieldDefault(headerGenerator, structRegistry));
        }).ifMember(presentAndUntaggedVersions -> {
            if (field.type().isVariableLength() && !field.type().isStruct()) {
                ClauseGenerator callGenerateVariableLengthReader = versions -> {
                    generateVariableLengthReader(fieldFlexibleVersions(field), field.camelCaseName(), field.type(), versions, field.nullableVersions(), String.format("this.%s = ", field.camelCaseName()), String.format(";%n"), structRegistry.isStructArrayWithKeys(field), field.zeroCopy());
                };
                if (field.type().isArray() && ((FieldType.ArrayType) field.type()).elementType().serializationIsDifferentInFlexibleVersions()) {
                    VersionConditional.forVersions(fieldFlexibleVersions(field), presentAndUntaggedVersions).ifMember(callGenerateVariableLengthReader).ifNotMember(callGenerateVariableLengthReader).generate(buffer);
                } else {
                    callGenerateVariableLengthReader.generate(presentAndUntaggedVersions);
                }
            } else if (field.type().isStruct()) {
                generateStructReader(field, presentAndUntaggedVersions, false);
            } else {
                buffer.printf("this.%s = %s;%n", field.camelCaseName(), primitiveReadExpression(field.type()));
            }
        }).generate(buffer);
    }
    buffer.printf("this._unknownTaggedFields = null;%n");
    VersionConditional.forVersions(messageFlexibleVersions, curVersions).ifMember(curFlexibleVersions -> {
        buffer.printf("int _numTaggedFields = _readable.readUnsignedVarint();%n");
        buffer.printf("for (int _i = 0; _i < _numTaggedFields; _i++) {%n");
        buffer.incrementIndent();
        buffer.printf("int _tag = _readable.readUnsignedVarint();%n");
        buffer.printf("int _size = _readable.readUnsignedVarint();%n");
        buffer.printf("switch (_tag) {%n");
        buffer.incrementIndent();
        for (FieldSpec field : struct.fields()) {
            Versions validTaggedVersions = field.versions().intersect(field.taggedVersions());
            if (!validTaggedVersions.empty()) {
                if (!field.tag().isPresent()) {
                    throw new RuntimeException("Field " + field.name() + " has tagged versions, but no tag.");
                }
                buffer.printf("case %d: {%n", field.tag().get());
                buffer.incrementIndent();
                VersionConditional.forVersions(validTaggedVersions, curFlexibleVersions).ifMember(presentAndTaggedVersions -> {
                    if (field.type().isVariableLength() && !field.type().isStruct()) {
                        generateVariableLengthReader(fieldFlexibleVersions(field), field.camelCaseName(), field.type(), presentAndTaggedVersions, field.nullableVersions(), String.format("this.%s = ", field.camelCaseName()), String.format(";%n"), structRegistry.isStructArrayWithKeys(field), field.zeroCopy());
                    } else if (field.type().isStruct()) {
                        generateStructReader(field, presentAndTaggedVersions, true);
                    } else {
                        buffer.printf("this.%s = %s;%n", field.camelCaseName(), primitiveReadExpression(field.type()));
                    }
                    buffer.printf("break;%n");
                }).ifNotMember(__ -> {
                    buffer.printf("throw new RuntimeException(\"Tag %d is not " + "valid for version \" + _version);%n", field.tag().get());
                }).generate(buffer);
                buffer.decrementIndent();
                buffer.printf("}%n");
            }
        }
        buffer.printf("default:%n");
        buffer.incrementIndent();
        buffer.printf("this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);%n");
        buffer.printf("break;%n");
        buffer.decrementIndent();
        buffer.decrementIndent();
        buffer.printf("}%n");
        buffer.decrementIndent();
        buffer.printf("}%n");
    }).generate(buffer);
    buffer.decrementIndent();
    buffer.printf("}%n");
}
283531.753981kafka
private void shouldHandleTimestampedRangeQueries(final boolean isTimestamped) {
    shouldHandleTimestampedRangeQuery(Optional.of(0), Optional.of(4), true, Arrays.asList(ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.of(1), Optional.of(3), true, Arrays.asList(ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.of(3), Optional.empty(), true, Arrays.asList(ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.empty(), Optional.of(3), true, Arrays.asList(ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.empty(), Optional.empty(), true, Arrays.asList(ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.of(0), Optional.of(4), false, Arrays.asList(ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.of(1), Optional.of(3), false, Arrays.asList(ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.of(3), Optional.empty(), false, Arrays.asList(ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.empty(), Optional.of(3), false, Arrays.asList(ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L)));
    shouldHandleTimestampedRangeQuery(Optional.empty(), Optional.empty(), false, Arrays.asList(ValueAndTimestamp.make(9, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 9 : -1L), ValueAndTimestamp.make(5, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 5 : -1L), ValueAndTimestamp.make(1, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() : -1L), ValueAndTimestamp.make(7, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 7 : -1L), ValueAndTimestamp.make(3, isTimestamped ? WINDOW_START + Duration.ofMinutes(2).toMillis() * 3 : -1L)));
}
284514.951129kafka
private void iteratorsShouldNotMigrateData() {
    try (final KeyValueIterator<Bytes, byte[]> itAll = rocksDBStore.all()) {
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key1".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '1' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key11".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key2".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '2', '2' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key4".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '4', '4', '4', '4' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key5".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '5', '5', '5', '5', '5' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key7".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '7', '7', '7', '7', '7', '7', '7' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key8".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '8', '8', '8', '8', '8', '8', '8', '8' }, keyValue.value);
        }
        assertFalse(itAll.hasNext());
    }
    try (final KeyValueIterator<Bytes, byte[]> it = rocksDBStore.range(new Bytes("key2".getBytes()), new Bytes("key5".getBytes()))) {
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key2".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '2', '2' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key4".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '4', '4', '4', '4' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key5".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '5', '5', '5', '5', '5' }, keyValue.value);
        }
        assertFalse(it.hasNext());
    }
    try (final KeyValueIterator<Bytes, byte[]> itAll = rocksDBStore.reverseAll()) {
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key8".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '8', '8', '8', '8', '8', '8', '8', '8' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key7".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '7', '7', '7', '7', '7', '7', '7' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key5".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '5', '5', '5', '5', '5' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key4".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '4', '4', '4', '4' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key2".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '2', '2' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key11".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = itAll.next();
            assertArrayEquals("key1".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '1' }, keyValue.value);
        }
        assertFalse(itAll.hasNext());
    }
    try (final KeyValueIterator<Bytes, byte[]> it = rocksDBStore.reverseRange(new Bytes("key2".getBytes()), new Bytes("key5".getBytes()))) {
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key5".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '5', '5', '5', '5', '5' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key4".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '4', '4', '4', '4' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key2".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '2', '2' }, keyValue.value);
        }
        assertFalse(it.hasNext());
    }
    try (final KeyValueIterator<Bytes, byte[]> it = rocksDBStore.prefixScan("key1", stringSerializer)) {
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key1".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, '1' }, keyValue.value);
        }
        {
            final KeyValue<Bytes, byte[]> keyValue = it.next();
            assertArrayEquals("key11".getBytes(), keyValue.key.get());
            assertArrayEquals(new byte[] { 't', 'i', 'm', 'e', 's', 't', 'a', 'm', 'p', '+', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1' }, keyValue.value);
        }
        assertFalse(it.hasNext());
    }
}
283364.8422105wildfly
private void addConnectionDefinition(final Resource parent, ConnectionDefinition connDef) {
    final Resource connDefResource = new IronJacamarResource.IronJacamarRuntimeResource();
    final ModelNode model = connDefResource.getModel();
    setAttribute(model, JNDI_NAME, connDef.getJndiName());
    if (connDef.getConfigProperties() != null) {
        for (Map.Entry<String, String> config : connDef.getConfigProperties().entrySet()) {
            addConfigProperties(connDefResource, config.getKey(), config.getValue());
        }
    }
    setAttribute(model, CLASS_NAME, connDef.getClassName());
    setAttribute(model, JNDI_NAME, connDef.getJndiName());
    setAttribute(model, USE_JAVA_CONTEXT, connDef.isUseJavaContext());
    setAttribute(model, ENABLED, connDef.isEnabled());
    setAttribute(model, CONNECTABLE, connDef.isConnectable());
    if (connDef.isTracking() != null) {
        setAttribute(model, TRACKING, connDef.isTracking());
    }
    setAttribute(model, USE_CCM, connDef.isUseCcm());
    setAttribute(model, SHARABLE, connDef.isSharable());
    setAttribute(model, ENLISTMENT, connDef.isEnlistment());
    final Pool pool = connDef.getPool();
    if (pool != null) {
        setAttribute(model, MAX_POOL_SIZE, pool.getMaxPoolSize());
        setAttribute(model, MIN_POOL_SIZE, pool.getMinPoolSize());
        setAttribute(model, INITIAL_POOL_SIZE, pool.getInitialPoolSize());
        if (pool.getCapacity() != null) {
            if (pool.getCapacity().getIncrementer() != null) {
                setAttribute(model, CAPACITY_INCREMENTER_CLASS, pool.getCapacity().getIncrementer().getClassName());
                if (pool.getCapacity().getIncrementer().getConfigPropertiesMap() != null) {
                    for (Map.Entry<String, String> config : pool.getCapacity().getIncrementer().getConfigPropertiesMap().entrySet()) {
                        model.get(CAPACITY_INCREMENTER_PROPERTIES.getName(), config.getKey()).set(config.getValue());
                    }
                }
            }
            if (pool.getCapacity().getDecrementer() != null) {
                setAttribute(model, CAPACITY_DECREMENTER_CLASS, pool.getCapacity().getDecrementer().getClassName());
                if (pool.getCapacity().getDecrementer().getConfigPropertiesMap() != null) {
                    for (Map.Entry<String, String> config : pool.getCapacity().getDecrementer().getConfigPropertiesMap().entrySet()) {
                        model.get(CAPACITY_DECREMENTER_PROPERTIES.getName(), config.getKey()).set(config.getValue());
                    }
                }
            }
        }
        setAttribute(model, POOL_USE_STRICT_MIN, pool.isUseStrictMin());
        if (pool.getFlushStrategy() != null)
            setAttribute(model, POOL_FLUSH_STRATEGY, pool.getFlushStrategy().name());
        setAttribute(model, POOL_PREFILL, pool.isPrefill());
        setAttribute(model, POOL_FAIR, pool.isFair());
        if (connDef.isXa()) {
            assert connDef.getPool() instanceof XaPool;
            XaPool xaPool = (XaPool) connDef.getPool();
            setAttribute(model, WRAP_XA_RESOURCE, xaPool.isWrapXaResource());
            setAttribute(model, SAME_RM_OVERRIDE, xaPool.isSameRmOverride());
            setAttribute(model, PAD_XID, xaPool.isPadXid());
            setAttribute(model, INTERLEAVING, xaPool.isInterleaving());
            setAttribute(model, NOTXSEPARATEPOOL, xaPool.isNoTxSeparatePool());
        }
    }
    final Security security = connDef.getSecurity();
    if (security != null) {
        setAttribute(model, APPLICATION, security.isApplication());
        setAttribute(model, ELYTRON_ENABLED, true);
        setAttribute(model, AUTHENTICATION_CONTEXT, security.getSecurityDomain());
        setAttribute(model, AUTHENTICATION_CONTEXT_AND_APPLICATION, security.getSecurityDomainAndApplication());
    }
    final TimeOut timeOut = connDef.getTimeOut();
    if (timeOut != null) {
        setAttribute(model, ALLOCATION_RETRY, timeOut.getAllocationRetry());
        setAttribute(model, ALLOCATION_RETRY_WAIT_MILLIS, timeOut.getAllocationRetryWaitMillis());
        setAttribute(model, BLOCKING_TIMEOUT_WAIT_MILLIS, timeOut.getBlockingTimeoutMillis());
        setAttribute(model, IDLETIMEOUTMINUTES, timeOut.getIdleTimeoutMinutes());
        setAttribute(model, XA_RESOURCE_TIMEOUT, timeOut.getXaResourceTimeout());
    }
    final Validation validation = connDef.getValidation();
    if (validation != null) {
        setAttribute(model, BACKGROUNDVALIDATIONMILLIS, validation.getBackgroundValidationMillis());
        setAttribute(model, BACKGROUNDVALIDATION, validation.isBackgroundValidation());
        setAttribute(model, USE_FAST_FAIL, validation.isUseFastFail());
        setAttribute(model, VALIDATE_ON_MATCH, validation.isValidateOnMatch());
    }
    final Recovery recovery = connDef.getRecovery();
    if (recovery != null) {
        setAttribute(model, NO_RECOVERY, recovery.getNoRecovery());
        final Extension recoverPlugin = recovery.getRecoverPlugin();
        if (recoverPlugin != null) {
            setAttribute(model, RECOVER_PLUGIN_CLASSNAME, recoverPlugin.getClassName());
            if (recoverPlugin.getConfigPropertiesMap() != null) {
                for (Map.Entry<String, String> config : recoverPlugin.getConfigPropertiesMap().entrySet()) {
                    model.get(RECOVER_PLUGIN_PROPERTIES.getName(), config.getKey()).set(config.getValue());
                }
            }
        }
        final Credential recoveryCredential = recovery.getCredential();
        if (recoveryCredential != null) {
            setAttribute(model, RECOVERY_PASSWORD, recoveryCredential.getPassword());
            setAttribute(model, RECOVERY_ELYTRON_ENABLED, true);
            setAttribute(model, RECOVERY_AUTHENTICATION_CONTEXT, recoveryCredential.getSecurityDomain());
            setAttribute(model, RECOVERY_USERNAME, recoveryCredential.getUserName());
        }
    }
    final Resource statsResource = new IronJacamarResource.IronJacamarRuntimeResource();
    connDefResource.registerChild(PathElement.pathElement(Constants.STATISTICS_NAME, "local"), statsResource);
    final PathElement element = PathElement.pathElement(Constants.CONNECTIONDEFINITIONS_NAME, connDef.getJndiName());
    parent.registerChild(element, connDefResource);
}
282374.2129103wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final EjbJarMetaData metaData = deploymentUnit.getAttachment(EjbDeploymentAttachmentKeys.EJB_JAR_METADATA);
    final EEModuleDescription eeModuleDescription = deploymentUnit.getAttachment(Attachments.EE_MODULE_DESCRIPTION);
    if (metaData == null) {
        return;
    }
    if (metaData.getInterceptors() == null) {
        return;
    }
    for (InterceptorMetaData interceptor : metaData.getInterceptors()) {
        String interceptorClassName = interceptor.getInterceptorClass();
        AroundInvokesMetaData aroundInvokes = interceptor.getAroundInvokes();
        if (aroundInvokes != null) {
            for (AroundInvokeMetaData aroundInvoke : aroundInvokes) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = aroundInvoke.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(Object.class, methodName, InvocationContext.class);
                builder.setAroundInvoke(methodIdentifier);
                if (aroundInvoke.getClassName() == null || aroundInvoke.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(aroundInvoke.getClassName(), builder.build());
                }
            }
        }
        AroundTimeoutsMetaData aroundTimeouts = interceptor.getAroundTimeouts();
        if (aroundTimeouts != null) {
            for (AroundTimeoutMetaData aroundTimeout : aroundTimeouts) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = aroundTimeout.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(Object.class, methodName, InvocationContext.class);
                builder.setAroundTimeout(methodIdentifier);
                if (aroundTimeout.getClassName() == null || aroundTimeout.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(aroundTimeout.getClassName(), builder.build());
                }
            }
        }
        LifecycleCallbacksMetaData postConstructs = interceptor.getPostConstructs();
        if (postConstructs != null) {
            for (LifecycleCallbackMetaData postConstruct : postConstructs) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = postConstruct.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName, InvocationContext.class);
                builder.setPostConstruct(methodIdentifier);
                if (postConstruct.getClassName() == null || postConstruct.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(postConstruct.getClassName(), builder.build());
                }
            }
        }
        LifecycleCallbacksMetaData preDestroys = interceptor.getPreDestroys();
        if (preDestroys != null) {
            for (LifecycleCallbackMetaData preDestroy : preDestroys) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = preDestroy.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName, InvocationContext.class);
                builder.setPreDestroy(methodIdentifier);
                if (preDestroy.getClassName() == null || preDestroy.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(preDestroy.getClassName(), builder.build());
                }
            }
        }
        LifecycleCallbacksMetaData prePassivates = interceptor.getPrePassivates();
        if (prePassivates != null) {
            for (LifecycleCallbackMetaData prePassivate : prePassivates) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = prePassivate.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName, InvocationContext.class);
                builder.setPrePassivate(methodIdentifier);
                if (prePassivate.getClassName() == null || prePassivate.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(prePassivate.getClassName(), builder.build());
                }
            }
        }
        LifecycleCallbacksMetaData postActivates = interceptor.getPostActivates();
        if (postActivates != null) {
            for (LifecycleCallbackMetaData postActivate : postActivates) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                String methodName = postActivate.getMethodName();
                MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName, InvocationContext.class);
                builder.setPostActivate(methodIdentifier);
                if (postActivate.getClassName() == null || postActivate.getClassName().isEmpty()) {
                    eeModuleDescription.addInterceptorMethodOverride(interceptorClassName, builder.build());
                } else {
                    eeModuleDescription.addInterceptorMethodOverride(postActivate.getClassName(), builder.build());
                }
            }
        }
        if (interceptor.getJndiEnvironmentRefsGroup() != null) {
            final DeploymentDescriptorEnvironment environment = new DeploymentDescriptorEnvironment("java:comp/env", interceptor.getJndiEnvironmentRefsGroup());
            eeModuleDescription.addInterceptorEnvironment(interceptor.getInterceptorClass(), new InterceptorEnvironment(environment));
        }
    }
}
282557.8819114wildfly
private static void generateMethodCode(ClassFile asm, Class<?> superclass, Method m, String idlName, String strategyField, String initMethod) {
    Class<?> returnType = m.getReturnType();
    Class<?>[] paramTypes = m.getParameterTypes();
    Class<?>[] exceptions = m.getExceptionTypes();
    asm.addField(Modifier.PRIVATE + Modifier.STATIC, strategyField, StubStrategy.class);
    final CodeAttribute ca = asm.addMethod(m).getCodeAttribute();
    ca.aload(0);
    ca.ldc(idlName);
    ca.getstatic(asm.getName(), strategyField, StubStrategy.class);
    if (paramTypes.length == 0) {
        ca.iconst(0);
        ca.anewarray(Object.class.getName());
    } else {
        ca.iconst(paramTypes.length);
        ca.anewarray(Object.class.getName());
        int index = 1;
        for (int j = 0; j < paramTypes.length; j++) {
            Class<?> type = paramTypes[j];
            ca.dup();
            ca.iconst(j);
            if (!type.isPrimitive()) {
                ca.aload(index);
            } else if (type.equals(double.class)) {
                ca.dload(index);
                Boxing.boxDouble(ca);
                index++;
            } else if (type.equals(long.class)) {
                ca.lload(index);
                Boxing.boxLong(ca);
                index++;
            } else if (type.equals(float.class)) {
                ca.fload(index);
                Boxing.boxFloat(ca);
            } else {
                ca.iload(index);
                Boxing.boxIfNessesary(ca, DescriptorUtils.makeDescriptor(type));
            }
            index++;
            ca.aastore();
        }
    }
    String invoke = "invoke";
    String ret = "Ljava/lang/Object;";
    if (returnType.isPrimitive() && returnType != Void.TYPE) {
        String typeName = returnType.getName();
        invoke += (Character.toUpperCase(typeName.charAt(0)) + typeName.substring(1));
        ret = DescriptorUtils.makeDescriptor(returnType);
    }
    ca.invokevirtual(superclass.getName(), invoke, "(Ljava/lang/String;Lorg/wildfly/iiop/openjdk/rmi/marshal/strategy/StubStrategy;[Ljava/lang/Object;)" + ret);
    if (!returnType.isPrimitive() && returnType != Object.class) {
        ca.checkcast(returnType);
    }
    ca.returnInstruction();
    final CodeAttribute init = asm.addMethod(Modifier.PRIVATE + Modifier.STATIC, initMethod, "V").getCodeAttribute();
    int i;
    int len;
    len = paramTypes.length;
    init.iconst(len);
    init.anewarray(String.class.getName());
    for (i = 0; i < len; i++) {
        init.dup();
        init.iconst(i);
        init.ldc(CDRStream.abbrevFor(paramTypes[i]));
        init.aastore();
    }
    len = exceptions.length;
    int n = 0;
    for (i = 0; i < len; i++) {
        if (!RemoteException.class.isAssignableFrom(exceptions[i])) {
            n++;
        }
    }
    init.iconst(n);
    init.anewarray(String.class.getName());
    try {
        int j = 0;
        for (i = 0; i < len; i++) {
            if (!RemoteException.class.isAssignableFrom(exceptions[i])) {
                init.dup();
                init.iconst(j);
                init.ldc(ExceptionAnalysis.getExceptionAnalysis(exceptions[i]).getExceptionRepositoryId());
                init.aastore();
                j++;
            }
        }
    } catch (RMIIIOPViolationException e) {
        throw EjbLogger.ROOT_LOGGER.exceptionRepositoryNotFound(exceptions[i].getName(), e.getLocalizedMessage());
    }
    init.iconst(n);
    init.anewarray(String.class.getName());
    int j = 0;
    for (i = 0; i < len; i++) {
        if (!RemoteException.class.isAssignableFrom(exceptions[i])) {
            init.dup();
            init.iconst(j);
            init.ldc(exceptions[i].getName());
            init.aastore();
            j++;
        }
    }
    init.ldc(CDRStream.abbrevFor(returnType));
    init.aconstNull();
    init.invokestatic(StubStrategy.class.getName(), "forMethod", "([Ljava/lang/String;[Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;Ljava/lang/ClassLoader;)Lorg/wildfly/iiop/openjdk/rmi/marshal/strategy/StubStrategy;");
    init.putstatic(asm.getName(), strategyField, StubStrategy.class);
    init.returnInstruction();
}
283186.1422106wildfly
public ServiceConfigurator configure(OperationContext context, ModelNode model) throws OperationFailedException {
    String advertiseSocket = ADVERTISE_SOCKET.resolveModelAttribute(context, model).asStringOrNull();
    this.advertiseSocketDependency = (advertiseSocket != null) ? new ServiceSupplierDependency<>(CommonUnaryRequirement.SOCKET_BINDING.getServiceName(context, advertiseSocket)) : null;
    this.builder.advertise().setAdvertiseSecurityKey(ADVERTISE_SECURITY_KEY.resolveModelAttribute(context, model).asStringOrNull());
    builder.mcmp().setAdvertise(ADVERTISE.resolveModelAttribute(context, model).asBoolean()).setProxyURL(PROXY_URL.resolveModelAttribute(context, model).asString()).setAutoEnableContexts(AUTO_ENABLE_CONTEXTS.resolveModelAttribute(context, model).asBoolean()).setStopContextTimeout(STOP_CONTEXT_TIMEOUT.resolveModelAttribute(context, model).asInt()).setStopContextTimeoutUnit(TimeUnit.valueOf(STOP_CONTEXT_TIMEOUT.getDefinition().getMeasurementUnit().getName())).setSocketTimeout(SOCKET_TIMEOUT.resolveModelAttribute(context, model).asInt() * 1000).setSessionDrainingStrategy(Enum.valueOf(SessionDrainingStrategyEnum.class, SESSION_DRAINING_STRATEGY.resolveModelAttribute(context, model).asString()));
    if (model.hasDefined(EXCLUDED_CONTEXTS.getName())) {
        String contexts = EXCLUDED_CONTEXTS.resolveModelAttribute(context, model).asString();
        Map<String, Set<String>> excludedContextsPerHost;
        if (contexts == null) {
            excludedContextsPerHost = Collections.emptyMap();
        } else {
            String trimmedContexts = contexts.trim();
            if (trimmedContexts.isEmpty()) {
                excludedContextsPerHost = Collections.emptyMap();
            } else {
                excludedContextsPerHost = new HashMap<>();
                for (String c : trimmedContexts.split(",")) {
                    String[] parts = c.trim().split(":");
                    if (parts.length > 2) {
                        throw ROOT_LOGGER.excludedContextsWrongFormat(trimmedContexts);
                    }
                    String host = null;
                    String trimmedContext = parts[0].trim();
                    if (parts.length == 2) {
                        host = trimmedContext;
                        trimmedContext = parts[1].trim();
                    }
                    String path;
                    switch(trimmedContext) {
                        case "ROOT":
                            ROOT_LOGGER.excludedContextsUseSlashInsteadROOT();
                        case "/":
                            path = "";
                            break;
                        default:
                            trimmedContext = trimmedContext.startsWith("/") ? trimmedContext : ("/" + trimmedContext);
                            path = trimmedContext.endsWith("/") ? trimmedContext.substring(0, trimmedContext.length() - 1) : trimmedContext;
                            break;
                    }
                    Set<String> paths = excludedContextsPerHost.computeIfAbsent(host, k -> new HashSet<>());
                    paths.add(path);
                }
            }
        }
        builder.mcmp().setExcludedContextsPerHost(excludedContextsPerHost);
    }
    builder.balancer().setStickySession(STICKY_SESSION.resolveModelAttribute(context, model).asBoolean()).setStickySessionRemove(STICKY_SESSION_REMOVE.resolveModelAttribute(context, model).asBoolean()).setStickySessionForce(STICKY_SESSION_FORCE.resolveModelAttribute(context, model).asBoolean()).setMaxAttempts(MAX_ATTEMPTS.resolveModelAttribute(context, model).asInt());
    ModelNode node = WORKER_TIMEOUT.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.balancer().setWorkerTimeout(node.asInt());
    }
    builder.node().setFlushPackets(FLUSH_PACKETS.resolveModelAttribute(context, model).asBoolean()).setPing(PING.resolveModelAttribute(context, model).asInt());
    node = FLUSH_WAIT.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setFlushWait(node.asInt());
    }
    node = SMAX.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setSmax(node.asInt());
    }
    node = TTL.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setTtl(node.asInt());
    }
    node = NODE_TIMEOUT.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setNodeTimeout(node.asInt());
    }
    node = BALANCER.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setBalancer(node.asString());
    }
    node = LOAD_BALANCING_GROUP.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        builder.node().setLoadBalancingGroup(node.asString());
    }
    node = PROXIES.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        for (ModelNode ref : node.asList()) {
            String asString = ref.asString();
            this.outboundSocketBindings.add(new ServiceSupplierDependency<>(CommonUnaryRequirement.OUTBOUND_SOCKET_BINDING.getServiceName(context, asString)));
        }
    }
    node = SSL_CONTEXT.resolveModelAttribute(context, model);
    if (node.isDefined()) {
        this.sslContextDependency = new ServiceSupplierDependency<>(CommonUnaryRequirement.SSL_CONTEXT.getServiceName(context, node.asString()));
    }
    return this;
}
282808.322698wildfly
protected void processAnnotation(final DeploymentUnit unit, final EEModuleDescription moduleDescription) throws DeploymentUnitProcessingException {
    if (!DeploymentTypeMarker.isType(DeploymentType.WAR, unit)) {
        return;
    }
    final Map<String, EEModuleClassDescription> classDescriptionMap = new HashMap<String, org.jboss.as.ee.component.EEModuleClassDescription>();
    final CompositeIndex index = unit.getAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX);
    for (EEModuleClassDescription classDescritpion : moduleDescription.getClassDescriptions()) {
        if (isJaxwsEndpoint(classDescritpion, index) && !exclude(unit, classDescritpion)) {
            classDescriptionMap.put(classDescritpion.getClassName(), classDescritpion);
        }
    }
    final JBossWebMetaData jbossWebMD = getJBossWebMetaData(unit);
    final JAXWSDeployment jaxwsDeployment = getJaxwsDeployment(unit);
    if (jbossWebMD != null) {
        final Set<String> matchedEps = new HashSet<String>();
        for (final ServletMetaData servletMD : getServlets(jbossWebMD)) {
            final String endpointClassName = getEndpointClassName(servletMD);
            final String endpointName = getEndpointName(servletMD);
            if (classDescriptionMap.containsKey(endpointClassName) || matchedEps.contains(endpointClassName)) {
                final ComponentDescription pojoComponent = createComponentDescription(unit, endpointName, endpointClassName, endpointName);
                final ServiceName pojoViewName = registerView(pojoComponent, endpointClassName);
                final String urlPattern = getUrlPattern(endpointName, unit);
                jaxwsDeployment.addEndpoint(new POJOEndpoint(endpointName, endpointClassName, pojoViewName, urlPattern));
                classDescriptionMap.remove(endpointClassName);
                matchedEps.add(endpointClassName);
            } else {
                if (unit.getParent() != null && DeploymentTypeMarker.isType(DeploymentType.EAR, unit.getParent())) {
                    final EEModuleDescription eeModuleDescription = unit.getParent().getAttachment(org.jboss.as.ee.component.Attachments.EE_MODULE_DESCRIPTION);
                    final CompositeIndex parentIndex = unit.getParent().getAttachment(Attachments.COMPOSITE_ANNOTATION_INDEX);
                    for (EEModuleClassDescription classDescription : eeModuleDescription.getClassDescriptions()) {
                        if (classDescription.getClassName().equals(endpointClassName) && isJaxwsEndpoint(classDescription, parentIndex)) {
                            final ComponentDescription pojoComponent = createComponentDescription(unit, endpointName, endpointClassName, endpointName);
                            final ServiceName pojoViewName = registerView(pojoComponent, endpointClassName);
                            final String urlPattern = getUrlPattern(endpointName, unit);
                            jaxwsDeployment.addEndpoint(new POJOEndpoint(endpointName, endpointClassName, pojoViewName, urlPattern));
                        }
                    }
                }
            }
        }
    }
    for (EEModuleClassDescription classDescription : classDescriptionMap.values()) {
        ClassInfo classInfo = null;
        String serviceName = null;
        String urlPattern = null;
        EJBEndpoint ejbEndpoint = getWebserviceMetadataEJBEndpoint(jaxwsDeployment, classDescription.getClassName());
        if (ejbEndpoint != null) {
            urlPattern = UrlPatternUtils.getUrlPatternByPortComponentURI(getJBossWebserviceMetaDataPortComponent(unit, ejbEndpoint.getName()));
        }
        if (urlPattern == null) {
            final ClassAnnotationInformation<WebContext, WebContextAnnotationInfo> annotationWebContext = classDescription.getAnnotationInformation(WebContext.class);
            if (annotationWebContext != null) {
                WebContextAnnotationInfo wsInfo = annotationWebContext.getClassLevelAnnotations().get(0);
                if (wsInfo != null && wsInfo.getUrlPattern().length() > 0) {
                    urlPattern = wsInfo.getUrlPattern();
                }
            }
        }
        if (urlPattern == null) {
            final ClassAnnotationInformation<WebService, WebServiceAnnotationInfo> annotationInfo = classDescription.getAnnotationInformation(WebService.class);
            if (annotationInfo != null) {
                WebServiceAnnotationInfo wsInfo = annotationInfo.getClassLevelAnnotations().get(0);
                serviceName = wsInfo.getServiceName();
                classInfo = (ClassInfo) wsInfo.getTarget();
                urlPattern = UrlPatternUtils.getUrlPattern(classInfo.name().local(), serviceName);
                if (jaxwsDeployment.contains(urlPattern)) {
                    urlPattern = UrlPatternUtils.getUrlPattern(classInfo.name().local(), serviceName, wsInfo.getName());
                }
            }
            final ClassAnnotationInformation<WebServiceProvider, WebServiceProviderAnnotationInfo> annotationProviderInfo = classDescription.getAnnotationInformation(WebServiceProvider.class);
            if (annotationProviderInfo != null) {
                WebServiceProviderAnnotationInfo wsInfo = annotationProviderInfo.getClassLevelAnnotations().get(0);
                serviceName = wsInfo.getServiceName();
                classInfo = (ClassInfo) wsInfo.getTarget();
            }
        }
        if (classInfo != null) {
            final String endpointClassName = classDescription.getClassName();
            final ComponentDescription pojoComponent = createComponentDescription(unit, endpointClassName, endpointClassName, endpointClassName);
            final ServiceName pojoViewName = registerView(pojoComponent, endpointClassName);
            if (urlPattern == null) {
                urlPattern = UrlPatternUtils.getUrlPattern(classInfo.name().local(), serviceName);
            }
            jaxwsDeployment.addEndpoint(new POJOEndpoint(endpointClassName, pojoViewName, UrlPatternUtils.getUrlPattern(urlPattern)));
        }
    }
}
292686.972795cassandra
public void notifyPostCommit(ClusterMetadata prev, ClusterMetadata next, boolean fromSnapshot) {
    if (!fromSnapshot && next.directory.lastModified().equals(prev.directory.lastModified()) && next.tokenMap.lastModified().equals(prev.tokenMap.lastModified()))
        return;
    Set<InetAddressAndPort> removedAddr = Sets.difference(new HashSet<>(prev.directory.allAddresses()), new HashSet<>(next.directory.allAddresses()));
    Set<NodeId> changed = new HashSet<>();
    for (NodeId node : next.directory.peerIds()) {
        if (directoryEntryChangedFor(node, prev.directory, next.directory) || !prev.tokenMap.tokens(node).equals(next.tokenMap.tokens(node)))
            changed.add(node);
    }
    for (InetAddressAndPort remove : removedAddr) {
        GossipHelper.evictFromMembership(remove);
        PeersTable.removeFromSystemPeersTables(remove);
    }
    for (NodeId change : changed) {
        if (next.myNodeId() != null && next.myNodeId().equals(change)) {
            switch(next.directory.peerState(change)) {
                case BOOTSTRAPPING:
                    if (prev.directory.peerState(change) != BOOTSTRAPPING) {
                        logger.info("JOINING: Starting to bootstrap");
                        logger.info("JOINING: calculation complete, ready to bootstrap");
                    }
                    break;
                case BOOT_REPLACING:
                case REGISTERED:
                    break;
                case JOINED:
                    SystemKeyspace.updateTokens(next.directory.endpoint(change), next.tokenMap.tokens(change));
                    Gossiper.instance.maybeInitializeLocalState(SystemKeyspace.incrementAndGetGeneration());
                    StreamSupport.stream(ColumnFamilyStore.all().spliterator(), false).filter(cfs -> Schema.instance.getUserKeyspaces().names().contains(cfs.keyspace.getName())).forEach(cfs -> cfs.indexManager.executePreJoinTasksBlocking(true));
                    if (prev.directory.peerState(change) == MOVING)
                        logger.info("Node {} state jump to NORMAL", next.directory.endpoint(change));
                    break;
            }
            Gossiper.instance.maybeInitializeLocalState(SystemKeyspace.incrementAndGetGeneration());
            Gossiper.instance.addLocalApplicationState(SCHEMA, StorageService.instance.valueFactory.schema(next.schema.getVersion()));
        }
        if (next.directory.peerState(change) == LEFT) {
            Gossiper.instance.mergeNodeToGossip(change, next, prev.tokenMap.tokens(change));
            InetAddressAndPort endpoint = prev.directory.endpoint(change);
            if (endpoint != null) {
                PeersTable.updateLegacyPeerTable(change, prev, next);
                if (!endpoint.equals(FBUtilities.getBroadcastAddressAndPort()))
                    GossipHelper.removeFromGossip(endpoint);
            }
        } else if (next.directory.peerState(change) == MOVING) {
            logger.debug("Node {} state MOVING, tokens {}", next.directory.endpoint(change), prev.tokenMap.tokens(change));
            Gossiper.instance.mergeNodeToGossip(change, next);
            PeersTable.updateLegacyPeerTable(change, prev, next);
        } else if (NodeState.isBootstrap(next.directory.peerState(change))) {
            Collection<Token> tokens = GossipHelper.getTokensFromOperation(change, next);
            Gossiper.instance.mergeNodeToGossip(change, next, tokens);
        } else if (prev.directory.peerState(change) == BOOT_REPLACING) {
            MultiStepOperation<?> sequence = prev.inProgressSequences.get(change);
            if (sequence != null && sequence.kind() == MultiStepOperation.Kind.REPLACE) {
                BootstrapAndReplace replace = (BootstrapAndReplace) sequence;
                InetAddressAndPort replaced = prev.directory.endpoint(replace.startReplace.replaced());
                InetAddressAndPort replacement = prev.directory.endpoint(change);
                Collection<Token> tokens = GossipHelper.getTokensFromOperation(replace);
                logger.info("Node {} will complete replacement of {} for tokens {}", replacement, replaced, tokens);
                if (!replacement.equals(replaced)) {
                    for (Token token : tokens) logger.warn("Token {} changing ownership from {} to {}", token, replaced, replacement);
                }
            }
        } else {
            Gossiper.instance.mergeNodeToGossip(change, next);
            PeersTable.updateLegacyPeerTable(change, prev, next);
        }
    }
}
292632.5912110cassandra
public void execute(NodeProbe probe) {
    boolean gossipInitialized = probe.isGossipRunning();
    PrintStream out = probe.output().out;
    out.printf("%-23s: %s%n", "ID", probe.getLocalHostId());
    out.printf("%-23s: %s%n", "Gossip active", gossipInitialized);
    out.printf("%-23s: %s%n", "Native Transport active", probe.isNativeTransportRunning());
    out.printf("%-23s: %s%n", "Load", probe.getLoadString());
    out.printf("%-23s: %s%n", "Uncompressed load", probe.getUncompressedLoadString());
    if (gossipInitialized)
        out.printf("%-23s: %s%n", "Generation No", probe.getCurrentGenerationNumber());
    else
        out.printf("%-23s: %s%n", "Generation No", 0);
    long secondsUp = probe.getUptime() / 1000;
    out.printf("%-23s: %d%n", "Uptime (seconds)", secondsUp);
    MemoryUsage heapUsage = probe.getHeapMemoryUsage();
    double memUsed = (double) heapUsage.getUsed() / (1024 * 1024);
    double memMax = (double) heapUsage.getMax() / (1024 * 1024);
    out.printf("%-23s: %.2f / %.2f%n", "Heap Memory (MB)", memUsed, memMax);
    try {
        out.printf("%-23s: %.2f%n", "Off Heap Memory (MB)", getOffHeapMemoryUsed(probe));
    } catch (RuntimeException e) {
        if (!(e.getCause() instanceof InstanceNotFoundException))
            throw e;
    }
    out.printf("%-23s: %s%n", "Data Center", probe.getDataCenter());
    out.printf("%-23s: %s%n", "Rack", probe.getRack());
    out.printf("%-23s: %s%n", "Exceptions", probe.getStorageMetric("Exceptions"));
    CacheServiceMBean cacheService = probe.getCacheServiceMBean();
    out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", "Key Cache", probe.getCacheMetric("KeyCache", "Entries"), FileUtils.stringifyFileSize((long) probe.getCacheMetric("KeyCache", "Size")), FileUtils.stringifyFileSize((long) probe.getCacheMetric("KeyCache", "Capacity")), probe.getCacheMetric("KeyCache", "Hits"), probe.getCacheMetric("KeyCache", "Requests"), probe.getCacheMetric("KeyCache", "HitRate"), cacheService.getKeyCacheSavePeriodInSeconds());
    out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", "Row Cache", probe.getCacheMetric("RowCache", "Entries"), FileUtils.stringifyFileSize((long) probe.getCacheMetric("RowCache", "Size")), FileUtils.stringifyFileSize((long) probe.getCacheMetric("RowCache", "Capacity")), probe.getCacheMetric("RowCache", "Hits"), probe.getCacheMetric("RowCache", "Requests"), probe.getCacheMetric("RowCache", "HitRate"), cacheService.getRowCacheSavePeriodInSeconds());
    out.printf("%-23s: entries %d, size %s, capacity %s, %d hits, %d requests, %.3f recent hit rate, %d save period in seconds%n", "Counter Cache", probe.getCacheMetric("CounterCache", "Entries"), FileUtils.stringifyFileSize((long) probe.getCacheMetric("CounterCache", "Size")), FileUtils.stringifyFileSize((long) probe.getCacheMetric("CounterCache", "Capacity")), probe.getCacheMetric("CounterCache", "Hits"), probe.getCacheMetric("CounterCache", "Requests"), probe.getCacheMetric("CounterCache", "HitRate"), cacheService.getCounterCacheSavePeriodInSeconds());
    try {
        out.printf("%-23s: entries %d, size %s, capacity %s, %d misses, %d requests, %.3f recent hit rate, %.3f %s miss latency%n", "Chunk Cache", probe.getCacheMetric("ChunkCache", "Entries"), FileUtils.stringifyFileSize((long) probe.getCacheMetric("ChunkCache", "Size")), FileUtils.stringifyFileSize((long) probe.getCacheMetric("ChunkCache", "Capacity")), probe.getCacheMetric("ChunkCache", "Misses"), probe.getCacheMetric("ChunkCache", "Requests"), probe.getCacheMetric("ChunkCache", "HitRate"), probe.getCacheMetric("ChunkCache", "MissLatency"), probe.getCacheMetric("ChunkCache", "MissLatencyUnit"));
    } catch (RuntimeException e) {
        if (!(e.getCause() instanceof InstanceNotFoundException))
            throw e;
    }
    try {
        out.printf("%-23s: size %s, overflow size: %s, capacity %s%n", "Network Cache", FileUtils.stringifyFileSize((long) probe.getBufferPoolMetric("networking", "Size")), FileUtils.stringifyFileSize((long) probe.getBufferPoolMetric("networking", "OverflowSize")), FileUtils.stringifyFileSize((long) probe.getBufferPoolMetric("networking", "Capacity")));
    } catch (RuntimeException e) {
        if (!(e.getCause() instanceof InstanceNotFoundException))
            throw e;
    }
    out.printf("%-23s: %s%%%n", "Percent Repaired", probe.getColumnFamilyMetric(null, null, "PercentRepaired"));
    if (probe.isJoined()) {
        List<String> tokens = probe.getTokens();
        if (tokens.size() == 1 || this.tokens)
            for (String token : tokens) out.printf("%-23s: %s%n", "Token", token);
        else
            out.printf("%-23s: (invoke with -T/--tokens to see all %d tokens)%n", "Token", tokens.size());
    } else {
        out.printf("%-23s: (node is not joined to the cluster)%n", "Token");
    }
    out.printf("%-23s: %s%n", "Bootstrap state", probe.getStorageService().getBootstrapState());
    out.printf("%-23s: %s%n", "Bootstrap failed", probe.getStorageService().isBootstrapFailed());
    out.printf("%-23s: %s%n", "Decommissioning", probe.getStorageService().isDecommissioning());
    out.printf("%-23s: %s%n", "Decommission failed", probe.getStorageService().isDecommissionFailed());
}
293648.3111109cassandra
public void simulate(int toBootstrap, int minSteps, ReplicationFactor rf, int concurrency) throws Throwable {
    logger.info("RUNNING SIMULATION WITH SEED {}. TO BOOTSTRAP: {}, RF: {}, CONCURRENCY: {}", seed, toBootstrap, rf, concurrency);
    long startTime = System.currentTimeMillis();
    ModelChecker<ModelState, CMSSut> modelChecker = new ModelChecker<>();
    ClusterMetadataService.unsetInstance();
    modelChecker.init(ModelState.empty(nodeFactory(), toBootstrap, concurrency), new CMSSut(AtomicLongBackedProcessor::new, false, rf)).step((state, sut) -> state.currentNodes.isEmpty(), (state, sut, entropySource) -> {
        for (Map.Entry<String, DCReplicas> e : rf.asMap().entrySet()) {
            int dcRf = e.getValue().totalCount;
            int dc = Integer.parseInt(e.getKey().replace("datacenter", ""));
            for (int i = 0; i < dcRf + 1; i++) {
                ModelChecker.Pair<ModelState, Node> registration = registerNewNode(state, sut, dc, 1);
                state = SimulatedOperation.joinWithoutBootstrap(registration.l, sut, registration.r);
            }
        }
        return new ModelChecker.Pair<>(state, sut);
    }).step((state, sut) -> state.uniqueNodes >= rf.total() && state.shouldBootstrap(), (state, sut, entropySource) -> {
        int dc = rf.asMap().size() == 1 ? 1 : entropySource.nextInt(rf.asMap().size() - 1) + 1;
        Node toAdd;
        if (!state.registeredNodes.isEmpty()) {
            toAdd = state.registeredNodes.remove(0);
        } else {
            ModelChecker.Pair<ModelState, Node> registration = registerNewNode(state, sut, dc, 1);
            state = registration.l;
            toAdd = registration.r;
        }
        return new ModelChecker.Pair<>(SimulatedOperation.join(sut, state, toAdd), sut);
    }).step((state, sut) -> state.shouldLeave(rf, rng), (state, sut, entropySource) -> {
        Node toRemove = getRemovalCandidate(state, entropySource);
        return new ModelChecker.Pair<>(SimulatedOperation.leave(sut, state, toRemove), sut);
    }).step((state, sut) -> state.shouldMove(rf, rng), (state, sut, entropySource) -> {
        Node toMove = getMoveCandidate(state, entropySource);
        return new ModelChecker.Pair<>(SimulatedOperation.move(sut, state, toMove, toMove.withNewToken()), sut);
    }).step((state, sut) -> state.shouldReplace(rf, rng), (state, sut, entropySource) -> {
        Node toReplace = getRemovalCandidate(state, entropySource);
        ModelChecker.Pair<ModelState, Node> registration = registerNewNode(state, sut, toReplace.tokenIdx(), toReplace.dcIdx(), toReplace.rackIdx());
        state = registration.l;
        Node replacement = registration.r;
        return new ModelChecker.Pair<>(SimulatedOperation.replace(sut, state, toReplace, replacement), sut);
    }).step((state, sut) -> state.shouldCancel(rng) && !state.inFlightOperations.isEmpty(), (state, sut, entropySource) -> {
        int idx = entropySource.nextInt(state.inFlightOperations.size());
        ModelState.Transformer transformer = state.transformer();
        SimulatedOperation oldOperationState = state.inFlightOperations.get(idx);
        oldOperationState.cancel(sut, state.simulatedPlacements, transformer);
        return pair(transformer.transform(), sut);
    }).step((state, sut) -> !state.inFlightOperations.isEmpty(), (state, sut, entropySource) -> {
        int idx = entropySource.nextInt(state.inFlightOperations.size());
        SimulatedPlacements simulatedState = state.simulatedPlacements;
        ModelState.Transformer transformer = state.transformer();
        SimulatedOperation oldOperationState = state.inFlightOperations.get(idx);
        oldOperationState.advance(simulatedState, transformer);
        return pair(transformer.transform(), sut);
    }).step((state, sut) -> rng.nextDouble() < 0.05, (state, sut, entropySource) -> {
        try {
            sut.service.commit(TriggerSnapshot.instance);
        } catch (IllegalStateException e) {
            Assert.assertTrue(e.getMessage().contains("Have just sealed this period"));
        }
        return pair(state, sut);
    }).invariant((state, sut) -> {
        if (state.currentNodes.size() > 0)
            validatePlacements(sut, state);
        return true;
    }).exitCondition((state, sut) -> {
        if (state.currentNodes.size() >= toBootstrap && state.inFlightOperations.size() == 0) {
            validatePlacementsFinal(sut, state);
            sut.close();
            logger.info("(RF: {}, CONCURRENCY: {}, RUN TIME: {}ms) - " + "REGISTERED: {}, CURRENT SIZE: {}, REJECTED {}, INFLIGHT: {} " + "FINISHED  (join,replace,leave,move): {} " + "CANCELLED (join,replace,leave,move): {} ", sut.rf.total(), concurrency, System.currentTimeMillis() - startTime, state.uniqueNodes, state.currentNodes.size(), state.rejected, state.inFlightOperations.size(), Arrays.toString(state.finished), Arrays.toString(state.cancelled));
            return true;
        }
        return false;
    }).run(minSteps, Integer.MAX_VALUE);
}
293090.01126cassandra
public void testCalledOnNullInput() throws Throwable {
    String fStateNonNull = createFunction(KEYSPACE, "int, int", "CREATE OR REPLACE FUNCTION %s(state int, val int) " + "RETURNS NULL ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS 'return Integer.valueOf(state + val);';");
    String fStateNull = createFunction(KEYSPACE, "int, int", "CREATE OR REPLACE FUNCTION %s(state int, val int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS 'return Integer.valueOf(" + "   (state != null ? state.intValue() : 0) " + "   + (val != null ? val.intValue() : 0));';");
    String fStateAlwaysNull = createFunction(KEYSPACE, "int, int", "CREATE OR REPLACE FUNCTION %s(state int, val int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS 'return null;';");
    String fFinalNonNull = createFunction(KEYSPACE, "int", "CREATE OR REPLACE FUNCTION %s(state int) " + "RETURNS NULL ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS 'return Integer.valueOf(state);';");
    String fFinalNull = createFunction(KEYSPACE, "int", "CREATE OR REPLACE FUNCTION %s(state int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS 'return state;';");
    assertInvalid("CREATE AGGREGATE " + KEYSPACE + ".invAggr(int) " + "SFUNC " + shortFunctionName(fStateNonNull) + " " + "STYPE int");
    assertInvalid("CREATE AGGREGATE " + KEYSPACE + ".invAggr(int) " + "SFUNC " + shortFunctionName(fStateNonNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNonNull));
    String aStateNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNull) + " " + "STYPE int");
    String aStateNullFinalNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNull));
    String aStateNullFinalNonNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNonNull));
    String aStateNonNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNonNull) + " " + "STYPE int " + "INITCOND 0");
    String aStateNonNullFinalNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNonNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNull) + " " + "INITCOND 0");
    String aStateNonNullFinalNonNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateNonNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNonNull) + " " + "INITCOND 0");
    String aStateAlwaysNullFinalNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateAlwaysNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNull));
    String aStateAlwaysNullFinalNonNull = createAggregate(KEYSPACE, "int", "CREATE AGGREGATE %s(int) " + "SFUNC " + shortFunctionName(fStateAlwaysNull) + " " + "STYPE int " + "FINALFUNC " + shortFunctionName(fFinalNonNull));
    createTable("CREATE TABLE %s (key int PRIMARY KEY, i int)");
    execute("INSERT INTO %s (key, i) VALUES (0, null)");
    execute("INSERT INTO %s (key, i) VALUES (1, 1)");
    execute("INSERT INTO %s (key, i) VALUES (2, 2)");
    execute("INSERT INTO %s (key, i) VALUES (3, 3)");
    assertRows(execute("SELECT " + aStateNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateNullFinalNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateNullFinalNonNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateNonNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateNonNullFinalNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateNonNullFinalNonNull + "(i) FROM %s WHERE key = 0"), row(0));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNull + "(i) FROM %s WHERE key = 0"), row(new Object[] { null }));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNonNull + "(i) FROM %s WHERE key = 0"), row(new Object[] { null }));
    assertRows(execute("SELECT " + aStateNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateNullFinalNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateNullFinalNonNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateNonNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateNonNullFinalNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateNonNullFinalNonNull + "(i) FROM %s WHERE key = 1"), row(1));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNull + "(i) FROM %s WHERE key = 1"), row(new Object[] { null }));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNonNull + "(i) FROM %s WHERE key = 1"), row(new Object[] { null }));
    assertRows(execute("SELECT " + aStateNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateNullFinalNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateNullFinalNonNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateNonNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateNonNullFinalNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateNonNullFinalNonNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(6));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(new Object[] { null }));
    assertRows(execute("SELECT " + aStateAlwaysNullFinalNonNull + "(i) FROM %s WHERE key IN (1, 2, 3)"), row(new Object[] { null }));
}
292972.991133cassandra
public void udf() throws Throwable {
    createTable(KEYSPACE, "CREATE TABLE %s (pk int primary key, value vector<int, 2>)");
    Vector<Integer> vector = vector(1, 2);
    execute("INSERT INTO %s (pk, value) VALUES (0, ?)", vector);
    String f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x vector<int, 2>) " + "CALLED ON NULL INPUT " + "RETURNS vector<int, 2> " + "LANGUAGE java " + "AS 'return x;'");
    assertRows(execute(format("SELECT %s(value) FROM %%s", f)), row(vector));
    assertRows(execute(format("SELECT %s([2, 3]) FROM %%s", f)), row(vector(2, 3)));
    assertRows(execute(format("SELECT %s(null) FROM %%s", f)), row((Vector<Integer>) null));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x list<vector<int, 2>>) " + "CALLED ON NULL INPUT " + "RETURNS list<vector<int, 2>> " + "LANGUAGE java " + "AS 'return x;'");
    assertRows(execute(format("SELECT %s([value]) FROM %%s", f)), row(list(vector)));
    assertRows(execute(format("SELECT %s([[2, 3]]) FROM %%s", f)), row(list(vector(2, 3))));
    assertRows(execute(format("SELECT %s(null) FROM %%s", f)), row((Vector<Integer>) null));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x vector<text, 2>) " + "CALLED ON NULL INPUT " + "RETURNS vector<text, 2> " + "LANGUAGE java " + "AS 'return x;'");
    assertRows(execute(format("SELECT %s(['abc', 'defghij']) FROM %%s", f)), row(vector("abc", "defghij")));
    assertRows(execute(format("SELECT %s(null) FROM %%s", f)), row((Vector<Integer>) null));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x vector<int, 2>, i int) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java " + "AS 'return x == null ? null : x.get(i);'");
    assertRows(execute(format("SELECT %s(value, 0), %<s(value, 1) FROM %%s", f)), row(1, 2));
    assertRows(execute(format("SELECT %s([2, 3], 0), %<s([2, 3], 1) FROM %%s", f)), row(2, 3));
    assertRows(execute(format("SELECT %s(null, 0) FROM %%s", f)), row((Integer) null));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x vector<int, 2>) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java " + "AS 'return x == null ? 0 : x.size();'");
    assertRows(execute(format("SELECT %s(value) FROM %%s", f)), row(2));
    assertRows(execute(format("SELECT %s([2, 3]) FROM %%s", f)), row(2));
    assertRows(execute(format("SELECT %s(null) FROM %%s", f)), row(0));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s () " + "CALLED ON NULL INPUT " + "RETURNS vector<double, 3> " + "LANGUAGE java " + "AS 'return Arrays.asList(1.3, 2.2, 3.1);'");
    assertRows(execute(format("SELECT %s() FROM %%s", f)), row(vector(1.3, 2.2, 3.1)));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s () " + "CALLED ON NULL INPUT " + "RETURNS vector<text, 3> " + "LANGUAGE java " + "AS 'return Arrays.asList(\"a\", \"bc\", \"def\");'");
    assertRows(execute(format("SELECT %s() FROM %%s", f)), row(vector("a", "bc", "def")));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (x vector<int, 2>, y vector<int, 2>) " + "CALLED ON NULL INPUT " + "RETURNS vector<int, 4> " + "LANGUAGE java " + "AS '" + "if (x == null || y == null) return null;" + "List<Integer> l = new ArrayList<Integer>(x); " + "l.addAll(y); " + "return l;'");
    assertRows(execute(format("SELECT %s(value, [3, 4]) FROM %%s", f)), row(vector(1, 2, 3, 4)));
    assertRows(execute(format("SELECT %s([2, 3], value) FROM %%s", f)), row(vector(2, 3, 1, 2)));
    assertRows(execute(format("SELECT %s(null, null) FROM %%s", f)), row((Vector<Integer>) null));
    assertInvalidThrowMessage("cannot be passed as argument 0 of function " + f, InvalidRequestException.class, format("SELECT %s((int) 0, [3, 4]) FROM %%s", f));
    assertInvalidThrowMessage("cannot be passed as argument 1 of function " + f, InvalidRequestException.class, format("SELECT %s([1, 2], (int) 0) FROM %%s", f));
    assertInvalidThrowMessage("Invalid number of arguments in call to function " + f, InvalidRequestException.class, format("SELECT %s([1, 2]) FROM %%s", f));
    assertInvalidThrowMessage("Invalid number of arguments in call to function " + f, InvalidRequestException.class, format("SELECT %s([1, 2], [3, 4], [5, 6]) FROM %%s", f));
    assertInvalidThrowMessage("Unable to create a vector selector of type vector<int, 2> from 3 elements", InvalidRequestException.class, format("SELECT %s([1, 2, 3], [4, 5, 6]) FROM %%s", f));
    assertInvalidThrowMessage("vectors may only have positive dimensions; given 0", InvalidRequestException.class, "CREATE FUNCTION %s (x vector<int, 0>) " + "CALLED ON NULL INPUT " + "RETURNS vector<int, 2> " + "LANGUAGE java " + "AS 'return x;'");
    assertInvalidThrowMessage("vectors may only have positive dimensions; given 0", InvalidRequestException.class, "CREATE FUNCTION %s (x vector<int, 2>) " + "CALLED ON NULL INPUT " + "RETURNS vector<int, 0> " + "LANGUAGE java " + "AS 'return x;'");
    String udt = createType("CREATE TYPE %s (v vector<int,2>)");
    alterTable("ALTER TABLE %s ADD udt " + udt);
    execute("INSERT INTO %s (pk, udt) VALUES (0, ?)", userType("v", vector));
    f = createFunction(KEYSPACE, "", "CREATE FUNCTION %s (udt " + udt + ") " + "CALLED ON NULL INPUT " + "RETURNS " + udt + ' ' + "LANGUAGE java " + "AS '" + "if (udt == null) return null;" + "List<Integer> v = new ArrayList<Integer>(udt.getVector(\"v\", Integer.class));" + "v.set(0, 7);" + "return udt.setVector(\"v\", v);'");
    assertRows(execute(format("SELECT %s(udt) FROM %%s", f)), row(userType("v", vector(7, 2))));
    assertRows(execute(format("SELECT %s({v: [10, 20]}) FROM %%s", f)), row(userType("v", vector(7, 20))));
    assertRows(execute(format("SELECT %s(null) FROM %%s", f)), row((Object) null));
    execute("DROP FUNCTION " + f);
}
292492.937120cassandra
public void testPerPartitionLimitWithStaticDataAndPaging() throws Throwable {
    String query = "CREATE TABLE %s (a int, b int, s int static, c int, PRIMARY KEY (a, b))";
    createTable(query);
    for (int i = 0; i < 5; i++) {
        execute("INSERT INTO %s (a, s) VALUES (?, ?)", i, i);
    }
    for (int pageSize = 1; pageSize < 8; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2", pageSize), row(0, null, 0, null), row(1, null, 1, null), row(2, null, 2, null), row(3, null, 3, null), row(4, null, 4, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2 LIMIT 4", pageSize), row(0, null, 0, null), row(1, null, 1, null), row(2, null, 2, null), row(3, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(0, null, 0, null), row(1, null, 1, null), row(2, null, 2, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1,3,4) PER PARTITION LIMIT 2", pageSize), row(1, null, 1, null), row(3, null, 3, null), row(4, null, 4, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1,3,4) PER PARTITION LIMIT 2 LIMIT 2", pageSize), row(1, null, 1, null), row(3, null, 3, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 PER PARTITION LIMIT 3", pageSize), row(2, null, 2, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 ORDER BY b DESC PER PARTITION LIMIT 3", pageSize), row(2, null, 2, null));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 AND s > 0 PER PARTITION LIMIT 2 ALLOW FILTERING", pageSize), row(2, null, 2, null));
    }
    for (int i = 0; i < 5; i++) {
        if (i != 1) {
            for (int j = 0; j < 5; j++) {
                execute("INSERT INTO %s (a, b, s, c) VALUES (?, ?, ?, ?)", i, j, i, j);
            }
        }
    }
    assertInvalidMessage("LIMIT must be strictly positive", "SELECT * FROM %s PER PARTITION LIMIT ?", 0);
    assertInvalidMessage("LIMIT must be strictly positive", "SELECT * FROM %s PER PARTITION LIMIT ?", -1);
    for (int pageSize = 1; pageSize < 8; pageSize++) {
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2", pageSize), row(0, 0, 0, 0), row(0, 1, 0, 1), row(1, null, 1, null), row(2, 0, 2, 0), row(2, 1, 2, 1), row(3, 0, 3, 0), row(3, 1, 3, 1), row(4, 0, 4, 0), row(4, 1, 4, 1));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2 LIMIT 4", pageSize), row(0, 0, 0, 0), row(0, 1, 0, 1), row(1, null, 1, null), row(2, 0, 2, 0));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER PARTITION LIMIT 2 LIMIT 5", pageSize), row(0, 0, 0, 0), row(0, 1, 0, 1), row(1, null, 1, null), row(2, 0, 2, 0), row(2, 1, 2, 1));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (2,3) PER PARTITION LIMIT 2", pageSize), row(2, 0, 2, 0), row(2, 1, 2, 1), row(3, 0, 3, 0), row(3, 1, 3, 1));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (2,3) PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(2, 0, 2, 0), row(2, 1, 2, 1), row(3, 0, 3, 0));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN (1,2,3) PER PARTITION LIMIT 2 LIMIT 3", pageSize), row(1, null, 1, null), row(2, 0, 2, 0), row(2, 1, 2, 1));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 PER PARTITION LIMIT 3", pageSize), row(2, 0, 2, 0), row(2, 1, 2, 1), row(2, 2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 ORDER BY b DESC PER PARTITION LIMIT 3", pageSize), row(2, 4, 2, 4), row(2, 3, 2, 3), row(2, 2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 AND b > 0 PER PARTITION LIMIT 2 ALLOW FILTERING", pageSize), row(2, 1, 2, 1), row(2, 2, 2, 2));
        assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 AND b > 2 ORDER BY b DESC PER PARTITION LIMIT 2 ALLOW FILTERING", pageSize), row(2, 4, 2, 4), row(2, 3, 2, 3));
    }
    assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries", "SELECT DISTINCT a FROM %s PER PARTITION LIMIT ?", 3);
    assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT DISTINCT queries", "SELECT DISTINCT a FROM %s PER PARTITION LIMIT ? LIMIT ?", 3, 4);
    assertInvalidMessage("PER PARTITION LIMIT is not allowed with aggregate queries.", "SELECT COUNT(*) FROM %s PER PARTITION LIMIT ?", 3);
}
293096.7410115elasticsearch
public void test() throws Exception {
    Path geoIpConfigDir = createTempDir();
    Path geoIpTmpDir = createTempDir();
    ClusterService clusterService = mock(ClusterService.class);
    when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE);
    DatabaseNodeService databaseNodeService = createRegistry(geoIpConfigDir, geoIpTmpDir, clusterService);
    GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService);
    Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City.mmdb"));
    Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"));
    databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb"));
    databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"));
    lazyLoadReaders(databaseNodeService);
    final GeoIpProcessor processor1 = (GeoIpProcessor) factory.create(null, "_tag", null, new HashMap<>(Map.of("field", "_field")));
    final GeoIpProcessor processor2 = (GeoIpProcessor) factory.create(null, "_tag", null, new HashMap<>(Map.of("field", "_field", "database_file", "GeoLite2-City-Test.mmdb")));
    final AtomicBoolean completed = new AtomicBoolean(false);
    final int numberOfDatabaseUpdates = randomIntBetween(2, 4);
    final AtomicInteger numberOfIngestRuns = new AtomicInteger();
    final int numberOfIngestThreads = randomIntBetween(16, 32);
    final Thread[] ingestThreads = new Thread[numberOfIngestThreads];
    final AtomicArray<Throwable> ingestFailures = new AtomicArray<>(numberOfIngestThreads);
    for (int i = 0; i < numberOfIngestThreads; i++) {
        final int id = i;
        ingestThreads[id] = new Thread(() -> {
            while (completed.get() == false) {
                try {
                    IngestDocument document1 = new IngestDocument("index", "id", 1L, "routing", VersionType.EXTERNAL, Map.of("_field", "89.160.20.128"));
                    processor1.execute(document1);
                    assertThat(document1.getSourceAndMetadata().get("geoip"), notNullValue());
                    IngestDocument document2 = new IngestDocument("index", "id", 1L, "routing", VersionType.EXTERNAL, Map.of("_field", "89.160.20.128"));
                    processor2.execute(document2);
                    assertThat(document2.getSourceAndMetadata().get("geoip"), notNullValue());
                    numberOfIngestRuns.incrementAndGet();
                } catch (Exception | AssertionError e) {
                    logger.error("error in ingest thread after run [" + numberOfIngestRuns.get() + "]", e);
                    ingestFailures.setOnce(id, e);
                    break;
                }
            }
        });
    }
    final AtomicReference<Throwable> failureHolder2 = new AtomicReference<>();
    Thread updateDatabaseThread = new Thread(() -> {
        for (int i = 0; i < numberOfDatabaseUpdates; i++) {
            try {
                DatabaseReaderLazyLoader previous1 = databaseNodeService.get("GeoLite2-City.mmdb");
                if (Files.exists(geoIpTmpDir.resolve("GeoLite2-City.mmdb"))) {
                    databaseNodeService.removeStaleEntries(List.of("GeoLite2-City.mmdb"));
                    assertBusy(() -> {
                        assertThat(Files.exists(geoIpTmpDir.resolve("GeoLite2-City.mmdb")), is(false));
                        assertThat(previous1.current(), equalTo(-1));
                    });
                } else {
                    Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City.mmdb"), StandardCopyOption.REPLACE_EXISTING);
                    databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb"));
                }
                DatabaseReaderLazyLoader previous2 = databaseNodeService.get("GeoLite2-City-Test.mmdb");
                InputStream source = ConfigDatabases.class.getResourceAsStream(i % 2 == 0 ? "/GeoIP2-City-Test.mmdb" : "/GeoLite2-City-Test.mmdb");
                Files.copy(source, geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"), StandardCopyOption.REPLACE_EXISTING);
                databaseNodeService.updateDatabase("GeoLite2-City-Test.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb"));
                DatabaseReaderLazyLoader current1 = databaseNodeService.get("GeoLite2-City.mmdb");
                DatabaseReaderLazyLoader current2 = databaseNodeService.get("GeoLite2-City-Test.mmdb");
                assertThat(current1, not(sameInstance(previous1)));
                assertThat(current2, not(sameInstance(previous2)));
                lazyLoadReaders(databaseNodeService);
            } catch (Exception | AssertionError e) {
                logger.error("error in update databases thread after run [" + i + "]", e);
                failureHolder2.set(e);
                break;
            }
        }
        completed.set(true);
    });
    Arrays.stream(ingestThreads).forEach(Thread::start);
    updateDatabaseThread.start();
    Arrays.stream(ingestThreads).forEach(thread -> {
        try {
            thread.join();
        } catch (InterruptedException e) {
            throw new AssertionError(e);
        }
    });
    updateDatabaseThread.join();
    ingestFailures.asList().forEach(r -> assertThat(r, nullValue()));
    assertThat(failureHolder2.get(), nullValue());
    assertThat(numberOfIngestRuns.get(), greaterThan(0));
    for (DatabaseReaderLazyLoader lazyLoader : databaseNodeService.getAllDatabases()) {
        assertThat(lazyLoader.current(), equalTo(0));
    }
    IOUtils.rm(geoIpConfigDir, geoIpTmpDir);
}
292064.04872elasticsearch
private void writeCast(Class<?> from, Class<?> to) {
    if (from.equals(to)) {
        return;
    }
    if (from != boolean.class && from.isPrimitive() && to != boolean.class && to.isPrimitive()) {
        cast(getType(from), getType(to));
    } else {
        if (to.isAssignableFrom(from) == false) {
            checkCast(getType(to));
        }
    }
}
292064.04872elasticsearch
private void writeCast(Class<?> from, Class<?> to) {
    if (from.equals(to)) {
        return;
    }
    if (from != boolean.class && from.isPrimitive() && to != boolean.class && to.isPrimitive()) {
        cast(getType(from), getType(to));
    } else {
        if (to.isAssignableFrom(from) == false) {
            checkCast(getType(to));
        }
    }
}
292566.681132elasticsearch
public void testUpdateRelations() throws Exception {
    MapperService mapperService = createMapperService(mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.field("parent", "child");
                b.array("child", "grand_child1", "grand_child2");
            }
            b.endObject();
        }
        b.endObject();
    }));
    IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.array("child", "grand_child1", "grand_child2");
            }
            b.endObject();
        }
        b.endObject();
    })));
    assertThat(exc.getMessage(), containsString("Cannot remove parent [parent]"));
    exc = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.field("parent", "child");
                b.array("child", "grand_child1");
            }
            b.endObject();
        }
        b.endObject();
    })));
    assertThat(exc.getMessage(), containsString("Cannot remove child [grand_child2]"));
    exc = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.field("uber_parent", "parent");
                b.field("parent", "child");
                b.array("child", "grand_child1", "grand_child2");
            }
            b.endObject();
        }
        b.endObject();
    })));
    assertThat(exc.getMessage(), containsString("Cannot create child [parent] from an existing root"));
    exc = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.field("parent", "child");
                b.array("child", "grand_child1", "grand_child2");
                b.field("grand_child2", "grand_grand_child");
            }
            b.endObject();
        }
        b.endObject();
    })));
    assertThat(exc.getMessage(), containsString("Cannot create parent [grand_child2] from an existing child"));
    merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.array("parent", "child", "child2");
                b.array("child", "grand_child1", "grand_child2");
            }
            b.endObject();
        }
        b.endObject();
    }));
    Joiner joiner = Joiner.getJoiner(mapperService.mappingLookup().getMatchingFieldNames("*").stream().map(mapperService.mappingLookup()::getFieldType));
    assertNotNull(joiner);
    assertEquals("join_field", joiner.getJoinField());
    assertTrue(joiner.childTypeExists("child2"));
    assertFalse(joiner.parentTypeExists("child2"));
    assertEquals(new TermQuery(new Term("join_field", "parent")), joiner.parentFilter("child"));
    assertEquals(new TermQuery(new Term("join_field", "parent")), joiner.parentFilter("child2"));
    assertTrue(joiner.childTypeExists("grand_child2"));
    assertFalse(joiner.parentTypeExists("grand_child2"));
    assertEquals(new TermQuery(new Term("join_field", "child")), joiner.parentFilter("grand_child1"));
    assertEquals(new TermQuery(new Term("join_field", "child")), joiner.parentFilter("grand_child2"));
    merge(mapperService, mapping(b -> {
        b.startObject("join_field");
        {
            b.field("type", "join");
            b.startObject("relations");
            {
                b.array("parent", "child", "child2");
                b.array("child", "grand_child1", "grand_child2");
                b.array("other", "child_other1", "child_other2");
            }
            b.endObject();
        }
        b.endObject();
    }));
    joiner = Joiner.getJoiner(mapperService.mappingLookup().getMatchingFieldNames("*").stream().map(mapperService.mappingLookup()::getFieldType));
    assertNotNull(joiner);
    assertEquals("join_field", joiner.getJoinField());
    assertTrue(joiner.childTypeExists("child2"));
    assertFalse(joiner.parentTypeExists("child2"));
    assertEquals(new TermQuery(new Term("join_field", "parent")), joiner.parentFilter("child"));
    assertEquals(new TermQuery(new Term("join_field", "parent")), joiner.parentFilter("child2"));
    assertTrue(joiner.childTypeExists("grand_child2"));
    assertFalse(joiner.parentTypeExists("grand_child2"));
    assertEquals(new TermQuery(new Term("join_field", "child")), joiner.parentFilter("grand_child1"));
    assertEquals(new TermQuery(new Term("join_field", "child")), joiner.parentFilter("grand_child2"));
    assertTrue(joiner.parentTypeExists("other"));
    assertFalse(joiner.childTypeExists("other"));
    assertTrue(joiner.childTypeExists("child_other1"));
    assertFalse(joiner.parentTypeExists("child_other1"));
    assertTrue(joiner.childTypeExists("child_other2"));
    assertFalse(joiner.parentTypeExists("child_other2"));
    assertEquals(new TermQuery(new Term("join_field", "other")), joiner.parentFilter("child_other2"));
}
294209.831118elasticsearch
public void testPercolateQuery() throws Exception {
    List<Iterable<? extends IndexableField>> docs = new ArrayList<>();
    List<Query> queries = new ArrayList<>();
    PercolateQuery.QueryStore queryStore = ctx -> queries::get;
    queries.add(new TermQuery(new Term("field", "fox")));
    docs.add(Collections.singleton(new StringField("select", "a", Field.Store.NO)));
    SpanNearQuery.Builder snp = new SpanNearQuery.Builder("field", true);
    snp.addClause(new SpanTermQuery(new Term("field", "jumps")));
    snp.addClause(new SpanTermQuery(new Term("field", "lazy")));
    snp.addClause(new SpanTermQuery(new Term("field", "dog")));
    snp.setSlop(2);
    queries.add(snp.build());
    docs.add(Collections.singleton(new StringField("select", "b", Field.Store.NO)));
    PhraseQuery.Builder pq1 = new PhraseQuery.Builder();
    pq1.add(new Term("field", "quick"));
    pq1.add(new Term("field", "brown"));
    pq1.add(new Term("field", "jumps"));
    pq1.setSlop(1);
    queries.add(pq1.build());
    docs.add(Collections.singleton(new StringField("select", "b", Field.Store.NO)));
    BooleanQuery.Builder bq1 = new BooleanQuery.Builder();
    bq1.add(new TermQuery(new Term("field", "quick")), BooleanClause.Occur.MUST);
    bq1.add(new TermQuery(new Term("field", "brown")), BooleanClause.Occur.MUST);
    bq1.add(new TermQuery(new Term("field", "fox")), BooleanClause.Occur.MUST);
    queries.add(bq1.build());
    docs.add(Collections.singleton(new StringField("select", "b", Field.Store.NO)));
    indexWriter.addDocuments(docs);
    indexWriter.close();
    directoryReader = DirectoryReader.open(directory);
    IndexSearcher shardSearcher = newSearcher(directoryReader);
    MemoryIndex memoryIndex = new MemoryIndex();
    memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
    IndexSearcher percolateSearcher = memoryIndex.createSearcher();
    Query query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("a")), new TermQuery(new Term("select", "a")), percolateSearcher, null, new MatchNoDocsQuery("")));
    TopDocs topDocs = shardSearcher.search(query, 10);
    assertThat(topDocs.totalHits.value, equalTo(1L));
    assertThat(topDocs.scoreDocs.length, equalTo(1));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
    Explanation explanation = shardSearcher.explain(query, 0);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
    query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("b")), new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery("")));
    topDocs = shardSearcher.search(query, 10);
    assertThat(topDocs.totalHits.value, equalTo(3L));
    assertThat(topDocs.scoreDocs.length, equalTo(3));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
    explanation = shardSearcher.explain(query, 1);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
    explanation = shardSearcher.explain(query, 2);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
    explanation = shardSearcher.explain(query, 2);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
    query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("c")), new MatchAllDocsQuery(), percolateSearcher, null, new MatchAllDocsQuery()));
    topDocs = shardSearcher.search(query, 10);
    assertThat(topDocs.totalHits.value, equalTo(4L));
    query = new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery(""));
    topDocs = shardSearcher.search(query, 10);
    assertThat(topDocs.totalHits.value, equalTo(3L));
    assertThat(topDocs.scoreDocs.length, equalTo(3));
    assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
    explanation = shardSearcher.explain(query, 3);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score));
    assertThat(explanation.getDetails(), arrayWithSize(1));
    assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
    explanation = shardSearcher.explain(query, 2);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[1].score));
    assertThat(explanation.getDetails(), arrayWithSize(1));
    assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
    explanation = shardSearcher.explain(query, 1);
    assertThat(explanation.isMatch(), is(true));
    assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score));
    assertThat(explanation.getDetails(), arrayWithSize(1));
}
294377.367110elasticsearch
public void testTwoNodesNoMasterBlock() throws Exception {
    internalCluster().setBootstrapMasterNodeIndex(1);
    Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build();
    logger.info("--> start first node");
    String node1Name = internalCluster().startNode(settings);
    logger.info("--> should be blocked, no master...");
    ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true));
    assertThat(state.nodes().getSize(), equalTo(1));
    logger.info("--> start second node, cluster should be formed");
    String node2Name = internalCluster().startNode(settings);
    ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
    assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().get().getState();
    assertThat(state.nodes().getSize(), equalTo(2));
    assertThat(state.metadata().indices().containsKey("test"), equalTo(false));
    createIndex("test");
    NumShards numShards = getNumShards("test");
    logger.info("--> indexing some data");
    for (int i = 0; i < 100; i++) {
        prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get();
    }
    assertThat(clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().getActiveShards(), equalTo(numShards.totalNumShards));
    flushAndRefresh();
    logger.info("--> verify we get the data back");
    for (int i = 0; i < 10; i++) {
        assertHitCount(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()), 100);
    }
    String masterNode = internalCluster().getMasterName();
    String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name;
    logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected");
    client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(otherNode)).get();
    logger.info("--> stop master node, no master block should appear");
    Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode);
    internalCluster().stopNode(masterNode);
    assertBusy(() -> {
        ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).get().getState();
        assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
    });
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true));
    assertThat(state.nodes().getSize(), equalTo(2));
    assertThat(state.nodes().getMasterNode(), equalTo(null));
    logger.info("--> starting the previous master node again...");
    node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build());
    clusterHealthResponse = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").get();
    assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().get().getState();
    assertThat(state.nodes().getSize(), equalTo(2));
    assertThat(state.metadata().indices().containsKey("test"), equalTo(true));
    ensureGreen();
    logger.info("--> verify we get the data back after cluster reform");
    for (int i = 0; i < 10; i++) {
        assertHitCount(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()), 100);
    }
    logger.info("--> clearing voting config exclusions");
    ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest();
    clearRequest.setWaitForRemoval(false);
    client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearRequest).get();
    masterNode = internalCluster().getMasterName();
    otherNode = node1Name.equals(masterNode) ? node2Name : node1Name;
    logger.info("--> add voting config exclusion for master node, to be sure it's not elected");
    client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNode)).get();
    logger.info("--> stop non-master node, no master block should appear");
    Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode);
    internalCluster().stopNode(otherNode);
    assertBusy(() -> {
        ClusterState state1 = clusterAdmin().prepareState().setLocal(true).get().getState();
        assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true));
    });
    logger.info("--> starting the previous master node again...");
    internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build());
    ensureGreen();
    clusterHealthResponse = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().get();
    assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().setLocal(true).get().getState();
    assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false));
    state = clusterAdmin().prepareState().get().getState();
    assertThat(state.nodes().getSize(), equalTo(2));
    assertThat(state.metadata().indices().containsKey("test"), equalTo(true));
    logger.info("Running Cluster Health");
    ensureGreen();
    logger.info("--> verify we the data back");
    for (int i = 0; i < 10; i++) {
        assertHitCount(prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()), 100);
    }
}
293963.898104elasticsearch
public void testIndexActions() throws Exception {
    createIndex();
    NumShards numShards = getNumShards(getConcreteIndexName());
    logger.info("Running Cluster Health");
    ensureGreen();
    logger.info("Indexing [type1/1]");
    DocWriteResponse indexResponse = prepareIndex("test").setId("1").setSource(source("1", "test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
    assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
    assertThat(indexResponse.getId(), equalTo("1"));
    logger.info("Refreshing");
    BroadcastResponse refreshResponse = refresh();
    assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
    logger.info("--> index exists?");
    assertThat(indexExists(getConcreteIndexName()), equalTo(true));
    logger.info("--> index exists?, fake index");
    assertThat(indexExists("test1234565"), equalTo(false));
    logger.info("Clearing cache");
    BroadcastResponse clearIndicesCacheResponse = indicesAdmin().clearCache(new ClearIndicesCacheRequest("test").fieldDataCache(true).queryCache(true)).actionGet();
    assertNoFailures(clearIndicesCacheResponse);
    assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
    logger.info("Force Merging");
    waitForRelocation(ClusterHealthStatus.GREEN);
    BaseBroadcastResponse mergeResponse = forceMerge();
    assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
    GetResponse getResult;
    logger.info("Get [type1/1]");
    for (int i = 0; i < 5; i++) {
        getResult = client().prepareGet("test", "1").get();
        assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
        assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
        assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
        getResult = client().get(new GetRequest("test").id("1")).actionGet();
        assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
        assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
    }
    logger.info("Get [type1/1] with script");
    for (int i = 0; i < 5; i++) {
        getResult = client().prepareGet("test", "1").setStoredFields("name").get();
        assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
        assertThat(getResult.isExists(), equalTo(true));
        assertThat(getResult.getSourceAsBytesRef(), nullValue());
        assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
    }
    logger.info("Get [type1/2] (should be empty)");
    for (int i = 0; i < 5; i++) {
        getResult = client().get(new GetRequest("test").id("2")).actionGet();
        assertThat(getResult.isExists(), equalTo(false));
    }
    logger.info("Delete [type1/1]");
    DeleteResponse deleteResponse = client().prepareDelete("test", "1").get();
    assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
    assertThat(deleteResponse.getId(), equalTo("1"));
    logger.info("Refreshing");
    indicesAdmin().refresh(new RefreshRequest("test")).actionGet();
    logger.info("Get [type1/1] (should be empty)");
    for (int i = 0; i < 5; i++) {
        getResult = client().get(new GetRequest("test").id("1")).actionGet();
        assertThat(getResult.isExists(), equalTo(false));
    }
    logger.info("Index [type1/1]");
    client().index(new IndexRequest("test").id("1").source(source("1", "test"))).actionGet();
    logger.info("Index [type1/2]");
    client().index(new IndexRequest("test").id("2").source(source("2", "test2"))).actionGet();
    logger.info("Flushing");
    BroadcastResponse flushResult = indicesAdmin().prepareFlush("test").get();
    assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards));
    assertThat(flushResult.getFailedShards(), equalTo(0));
    logger.info("Refreshing");
    indicesAdmin().refresh(new RefreshRequest("test")).actionGet();
    logger.info("Get [type1/1] and [type1/2]");
    for (int i = 0; i < 5; i++) {
        getResult = client().get(new GetRequest("test").id("1")).actionGet();
        assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
        assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(Strings.toString(source("1", "test"))));
        getResult = client().get(new GetRequest("test").id("2")).actionGet();
        String ste1 = getResult.getSourceAsString();
        String ste2 = Strings.toString(source("2", "test2"));
        assertThat("cycle #" + i, ste1, equalTo(ste2));
        assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
    }
    logger.info("Count");
    for (int i = 0; i < 5; i++) {
        assertNoFailuresAndResponse(prepareSearch("test").setSize(0).setQuery(matchAllQuery()), countResponse -> {
            assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L));
            assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
            assertThat(countResponse.getFailedShards(), equalTo(0));
        });
        assertNoFailuresAndResponse(prepareSearch("test").setSize(0), countResponse -> {
            assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
            assertThat(countResponse.getHits().getTotalHits().value, equalTo(2L));
            assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
            assertThat(countResponse.getFailedShards(), equalTo(0));
        });
    }
}
293833.666108elasticsearch
public void testShardMultiGetFromTranslog() throws Exception {
    assertAcked(prepareCreate(INDEX).setSettings(Settings.builder().put("index.refresh_interval", -1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))));
    ensureGreen();
    var shardRouting = randomFrom(clusterService().state().routingTable().allShards(INDEX));
    var indicesService = internalCluster().getInstance(IndicesService.class, clusterService().state().nodes().get(shardRouting.currentNodeId()).getName());
    var initialGeneration = indicesService.indexServiceSafe(shardRouting.index()).getShard(shardRouting.id()).getEngineOrNull().getLastCommittedSegmentInfos().getGeneration();
    var indexResponse = prepareIndex(INDEX).setId("0").setSource("field1", "value2").get();
    client().prepareGet("test", indexResponse.getId()).get();
    var mgetIds = List.of("1", "2", "3");
    var response = getFromTranslog(shardRouting, mgetIds);
    var multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(3));
    assertThat(getFailures(multiGetShardResponse).size(), equalTo(3));
    assertTrue(getFailures(multiGetShardResponse).stream().allMatch(Objects::isNull));
    assertThat(getResponses(multiGetShardResponse).size(), equalTo(3));
    assertTrue(getResponses(multiGetShardResponse).stream().allMatch(Objects::isNull));
    assertThat(response.segmentGeneration(), equalTo(initialGeneration));
    var bulkRequest = client().prepareBulk();
    var idsToIndex = randomSubsetOf(2, mgetIds);
    for (String id : idsToIndex) {
        bulkRequest.add(new IndexRequest(INDEX).id(id).source("field1", "value1"));
    }
    bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE);
    var bulkResponse = bulkRequest.get();
    assertNoFailures(bulkResponse);
    response = getFromTranslog(shardRouting, mgetIds);
    multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(3));
    assertThat(getFailures(multiGetShardResponse).size(), equalTo(3));
    assertTrue(getFailures(multiGetShardResponse).stream().allMatch(Objects::isNull));
    var getResponses = getResponses(multiGetShardResponse);
    assertThat(getResponses.size(), equalTo(3));
    for (int location = 0; location < mgetIds.size(); location++) {
        var id = mgetIds.get(location);
        var getResponse = getResponses.get(location);
        if (idsToIndex.contains(id)) {
            assertNotNull(getResponse);
            assertThat(getResponse.getId(), equalTo(id));
            var bulkResponseForId = Arrays.stream(bulkResponse.getItems()).filter(r -> r.getId().equals(id)).toList();
            assertThat(bulkResponseForId.size(), equalTo(1));
            assertThat(getResponse.getVersion(), equalTo(bulkResponseForId.get(0).getVersion()));
        } else {
            assertNull(getResponse);
        }
    }
    assertThat(response.segmentGeneration(), equalTo(initialGeneration));
    var idToDelete = randomFrom(idsToIndex);
    client().prepareDelete(INDEX, idToDelete).get();
    response = getFromTranslog(shardRouting, idsToIndex);
    multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(2));
    assertTrue(getFailures(multiGetShardResponse).stream().allMatch(Objects::isNull));
    getResponses = getResponses(multiGetShardResponse);
    assertThat(getResponses.size(), equalTo(2));
    assertTrue(getResponses.stream().allMatch(Objects::nonNull));
    for (var getResponse : getResponses) {
        var shouldExist = getResponse.getId().equals(idToDelete) ? false : true;
        assertThat(getResponse.isExists(), equalTo(shouldExist));
    }
    assertThat(response.segmentGeneration(), equalTo(-1L));
    indexResponse = prepareIndex(INDEX).setSource("field1", "value2").get();
    response = getFromTranslog(shardRouting, List.of(indexResponse.getId()));
    multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(1));
    assertTrue(getFailures(multiGetShardResponse).stream().allMatch(Objects::isNull));
    getResponses = getResponses(multiGetShardResponse);
    assertThat(getResponses.size(), equalTo(1));
    assertNotNull(getResponses.get(0));
    assertThat(getResponses.get(0).getId(), equalTo(indexResponse.getId()));
    assertThat(getResponses.get(0).getVersion(), equalTo(indexResponse.getVersion()));
    assertThat(response.segmentGeneration(), equalTo(-1L));
    refresh(INDEX);
    response = getFromTranslog(shardRouting, List.of(indexResponse.getId()));
    multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(1));
    assertTrue(getFailures(multiGetShardResponse).stream().allMatch(Objects::isNull));
    assertTrue("after a refresh we should not be able to get from translog", getResponses(multiGetShardResponse).stream().allMatch(Objects::isNull));
    assertThat(response.segmentGeneration(), equalTo(initialGeneration));
    prepareIndex(INDEX).setSource("field1", "value3").get();
    refresh(INDEX);
    refresh(INDEX);
    prepareIndex(INDEX).setSource("field1", "value4").get();
    response = getFromTranslog(shardRouting, List.of("non-existent"));
    multiGetShardResponse = response.multiGetShardResponse();
    assertThat(getLocations(multiGetShardResponse).size(), equalTo(1));
    assertThat(getFailures(multiGetShardResponse).size(), equalTo(1));
    assertNull(getFailures(multiGetShardResponse).get(0));
    assertThat(getResponses(multiGetShardResponse).size(), equalTo(1));
    assertNull(getResponses(multiGetShardResponse).get(0));
    assertThat(response.segmentGeneration(), equalTo(initialGeneration));
}
291933.73150elasticsearch
public void init() throws Exception {
    CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.analysis.analyzer.perfect_match.type", "custom").put("index.analysis.analyzer.perfect_match.tokenizer", "keyword").put("index.analysis.analyzer.perfect_match.filter", "lowercase").put("index.analysis.analyzer.category.type", "custom").put("index.analysis.analyzer.category.tokenizer", "standard").put("index.analysis.analyzer.category.filter", "lowercase"));
    assertAcked(builder.setMapping(createMapping()));
    ensureGreen();
    int numDocs = scaledRandomIntBetween(50, 100);
    List<IndexRequestBuilder> builders = new ArrayList<>();
    builders.add(prepareIndex("test").setId("theone").setSource("id", "theone", "full_name", "Captain America", "first_name", "Captain", "last_name", "America", "category", "marvel hero", "skill", 15, "int-field", 25));
    builders.add(prepareIndex("test").setId("theother").setSource("id", "theother", "full_name", "marvel hero", "first_name", "marvel", "last_name", "hero", "category", "bogus", "skill", 5));
    builders.add(prepareIndex("test").setId("ultimate1").setSource("id", "ultimate1", "full_name", "Alpha the Ultimate Mutant", "first_name", "Alpha the", "last_name", "Ultimate Mutant", "category", "marvel hero", "skill", 1));
    builders.add(prepareIndex("test").setId("ultimate2").setSource("full_name", "Man the Ultimate Ninja", "first_name", "Man the Ultimate", "last_name", "Ninja", "category", "marvel hero", "skill", 3));
    builders.add(prepareIndex("test").setId("anotherhero").setSource("id", "anotherhero", "full_name", "ultimate", "first_name", "wolferine", "last_name", "", "category", "marvel hero", "skill", 1));
    builders.add(prepareIndex("test").setId("nowHero").setSource("id", "nowHero", "full_name", "now sort of", "first_name", "now", "last_name", "", "category", "marvel hero", "skill", 1));
    List<String> firstNames = new ArrayList<>();
    fill(firstNames, "Captain", between(15, 25));
    fill(firstNames, "Ultimate", between(5, 10));
    fillRandom(firstNames, between(3, 7));
    List<String> lastNames = new ArrayList<>();
    fill(lastNames, "Captain", between(3, 7));
    fillRandom(lastNames, between(30, 40));
    for (int i = 0; i < numDocs; i++) {
        String first = RandomPicks.randomFrom(random(), firstNames);
        String last = randomPickExcept(lastNames, first);
        builders.add(prepareIndex("test").setId("" + i).setSource("id", i, "full_name", first + " " + last, "first_name", first, "last_name", last, "category", randomBoolean() ? "marvel hero" : "bogus", "skill", between(1, 3)));
    }
    indexRandom(true, false, builders);
}
293550.375111elasticsearch
public void testGetSnapshotsRequest() throws Exception {
    final String repositoryName = "test-repo";
    final String indexName = "test-idx";
    final Client client = client();
    createRepository(repositoryName, "mock", Settings.builder().put("location", randomRepoPath()).put("compress", false).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).put("wait_after_unblock", 200));
    logger.info("--> get snapshots on an empty repository");
    expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot"));
    GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setIgnoreUnavailable(true).addSnapshots("non-existent-snapshot").get();
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
    logger.info("--> creating an index and indexing documents");
    assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
    ensureGreen();
    indexRandomDocs(indexName, 10);
    final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
    client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo").setWaitForCompletion(false).setIndices(indexName).get();
    waitForBlock(initialBlockedNode, repositoryName);
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")).get();
    assertEquals(1, getSnapshotsResponse.getSnapshots().size());
    assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName());
    unblockNode(repositoryName, initialBlockedNode);
    startDeleteSnapshot(repositoryName, "snap-on-empty-repo").get();
    final int numSnapshots = randomIntBetween(1, 3) + 1;
    logger.info("--> take {} snapshot(s)", numSnapshots - 1);
    final String[] snapshotNames = new String[numSnapshots];
    for (int i = 0; i < numSnapshots - 1; i++) {
        final String snapshotName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
        CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).setIndices(indexName).get();
        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
        snapshotNames[i] = snapshotName;
    }
    logger.info("--> take another snapshot to be in-progress");
    for (int i = 10; i < 20; i++) {
        indexDoc(indexName, Integer.toString(i), "foo", "bar" + i);
    }
    refresh();
    final String inProgressSnapshot = randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
    snapshotNames[numSnapshots - 1] = inProgressSnapshot;
    final String blockedNode = blockNodeWithIndex(repositoryName, indexName);
    client.admin().cluster().prepareCreateSnapshot(repositoryName, inProgressSnapshot).setWaitForCompletion(false).setIndices(indexName).get();
    waitForBlock(blockedNode, repositoryName);
    logger.info("--> get all snapshots with a current in-progress");
    final List<String> snapshotsToGet = new ArrayList<>();
    if (randomBoolean()) {
        snapshotsToGet.add("_current");
        for (int i = 0; i < numSnapshots - 1; i++) {
            snapshotsToGet.add(snapshotNames[i]);
        }
    } else {
        snapshotsToGet.add("_all");
    }
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)).get();
    List<String> sortedNames = Arrays.asList(snapshotNames);
    Collections.sort(sortedNames);
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get();
    sortedNames = Arrays.asList(snapshotNames);
    Collections.sort(sortedNames);
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
    logger.info("--> make sure duplicates are not returned in the response");
    String regexName = snapshotNames[randomIntBetween(0, numSnapshots - 1)];
    final int splitPos = regexName.length() / 2;
    final String firstRegex = regexName.substring(0, splitPos) + "*";
    final String secondRegex = "*" + regexName.substring(splitPos);
    getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).addSnapshots(firstRegex, secondRegex).get();
    assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
    assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames));
    unblockNode(repositoryName, blockedNode);
    awaitNoMoreRunningOperations();
}
292875.8215106elasticsearch
public static AnalysisStats of(Metadata metadata, Runnable ensureNotCancelled) {
    final Map<String, IndexFeatureStats> usedCharFilterTypes = new HashMap<>();
    final Map<String, IndexFeatureStats> usedTokenizerTypes = new HashMap<>();
    final Map<String, IndexFeatureStats> usedTokenFilterTypes = new HashMap<>();
    final Map<String, IndexFeatureStats> usedAnalyzerTypes = new HashMap<>();
    final Map<String, IndexFeatureStats> usedBuiltInCharFilters = new HashMap<>();
    final Map<String, IndexFeatureStats> usedBuiltInTokenizers = new HashMap<>();
    final Map<String, IndexFeatureStats> usedBuiltInTokenFilters = new HashMap<>();
    final Map<String, IndexFeatureStats> usedBuiltInAnalyzers = new HashMap<>();
    final Map<String, SynonymsStats> usedSynonyms = new HashMap<>();
    final Set<String> synonymsIdsUsedInIndices = new HashSet<>();
    final Set<String> synonymsIdsUsed = new HashSet<>();
    final Map<MappingMetadata, Integer> mappingCounts = new IdentityHashMap<>(metadata.getMappingsByHash().size());
    for (IndexMetadata indexMetadata : metadata) {
        ensureNotCancelled.run();
        if (indexMetadata.isSystem()) {
            continue;
        }
        Set<String> indexCharFilters = new HashSet<>();
        Set<String> indexTokenizers = new HashSet<>();
        Set<String> indexTokenFilters = new HashSet<>();
        Set<String> indexAnalyzerTypes = new HashSet<>();
        Set<String> indexCharFilterTypes = new HashSet<>();
        Set<String> indexTokenizerTypes = new HashSet<>();
        Set<String> indexTokenFilterTypes = new HashSet<>();
        Settings indexSettings = indexMetadata.getSettings();
        Map<String, Settings> analyzerSettings = indexSettings.getGroups("index.analysis.analyzer");
        usedBuiltInAnalyzers.keySet().removeAll(analyzerSettings.keySet());
        for (Settings analyzerSetting : analyzerSettings.values()) {
            final String analyzerType = analyzerSetting.get("type", "custom");
            IndexFeatureStats stats = usedAnalyzerTypes.computeIfAbsent(analyzerType, IndexFeatureStats::new);
            stats.count++;
            if (indexAnalyzerTypes.add(analyzerType)) {
                stats.indexCount++;
            }
            for (String charFilter : analyzerSetting.getAsList("char_filter")) {
                stats = usedBuiltInCharFilters.computeIfAbsent(charFilter, IndexFeatureStats::new);
                stats.count++;
                if (indexCharFilters.add(charFilter)) {
                    stats.indexCount++;
                }
            }
            String tokenizer = analyzerSetting.get("tokenizer");
            if (tokenizer != null) {
                stats = usedBuiltInTokenizers.computeIfAbsent(tokenizer, IndexFeatureStats::new);
                stats.count++;
                if (indexTokenizers.add(tokenizer)) {
                    stats.indexCount++;
                }
            }
            for (String filter : analyzerSetting.getAsList("filter")) {
                stats = usedBuiltInTokenFilters.computeIfAbsent(filter, IndexFeatureStats::new);
                stats.count++;
                if (indexTokenFilters.add(filter)) {
                    stats.indexCount++;
                }
            }
        }
        Map<String, Settings> charFilterSettings = indexSettings.getGroups("index.analysis.char_filter");
        usedBuiltInCharFilters.keySet().removeAll(charFilterSettings.keySet());
        aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilterTypes, indexCharFilterTypes);
        Map<String, Settings> tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer");
        usedBuiltInTokenizers.keySet().removeAll(tokenizerSettings.keySet());
        aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizerTypes, indexTokenizerTypes);
        Map<String, Settings> tokenFilterSettings = indexSettings.getGroups("index.analysis.filter");
        usedBuiltInTokenFilters.keySet().removeAll(tokenFilterSettings.keySet());
        aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilterTypes, indexTokenFilterTypes);
        aggregateSynonymsStats(tokenFilterSettings.values(), usedSynonyms, indexMetadata.getIndex().getName(), synonymsIdsUsed, synonymsIdsUsedInIndices);
        countMapping(mappingCounts, indexMetadata);
    }
    for (Map.Entry<MappingMetadata, Integer> mappingAndCount : mappingCounts.entrySet()) {
        ensureNotCancelled.run();
        Set<String> indexAnalyzers = new HashSet<>();
        final int count = mappingAndCount.getValue();
        MappingVisitor.visitMapping(mappingAndCount.getKey().getSourceAsMap(), (field, fieldMapping) -> {
            for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) {
                Object analyzerO = fieldMapping.get(key);
                if (analyzerO != null) {
                    final String analyzer = analyzerO.toString();
                    IndexFeatureStats stats = usedBuiltInAnalyzers.computeIfAbsent(analyzer, IndexFeatureStats::new);
                    stats.count += count;
                    if (indexAnalyzers.add(analyzer)) {
                        stats.indexCount += count;
                    }
                }
            }
        });
    }
    return new AnalysisStats(usedCharFilterTypes.values(), usedTokenizerTypes.values(), usedTokenFilterTypes.values(), usedAnalyzerTypes.values(), usedBuiltInCharFilters.values(), usedBuiltInTokenizers.values(), usedBuiltInTokenFilters.values(), usedBuiltInAnalyzers.values(), usedSynonyms);
}
292275.318115elasticsearch
 static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClusterStateUpdateRequest request, Settings combinedTemplateSettings, List<CompressedXContent> combinedTemplateMappings, @Nullable IndexMetadata sourceMetadata, Settings settings, IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator, Set<IndexSettingProvider> indexSettingProviders) {
    final boolean isDataStreamIndex = request.dataStreamName() != null;
    final var metadata = currentState.getMetadata();
    final Settings.Builder templateSettings = Settings.builder().put(combinedTemplateSettings);
    final Settings.Builder requestSettings = Settings.builder().put(request.settings());
    final Settings.Builder indexSettingsBuilder = Settings.builder();
    if (sourceMetadata == null) {
        final Settings.Builder additionalIndexSettings = Settings.builder();
        final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build();
        final boolean timeSeriesTemplate = Optional.of(request).map(CreateIndexClusterStateUpdateRequest::matchingTemplate).map(metadata::isTimeSeriesTemplate).orElse(false);
        final var resolvedAt = Instant.ofEpochMilli(request.getNameResolvedAt());
        for (IndexSettingProvider provider : indexSettingProviders) {
            additionalIndexSettings.put(provider.getAdditionalIndexSettings(request.index(), request.dataStreamName(), timeSeriesTemplate, currentState.getMetadata(), resolvedAt, templateAndRequestSettings, combinedTemplateMappings));
        }
        for (String explicitSetting : additionalIndexSettings.keys()) {
            if (templateSettings.keys().contains(explicitSetting) && templateSettings.get(explicitSetting) == null) {
                logger.debug("removing default [{}] setting as it in set to null in a template for [{}] creation", explicitSetting, request.index());
                additionalIndexSettings.remove(explicitSetting);
                templateSettings.remove(explicitSetting);
            }
            if (requestSettings.keys().contains(explicitSetting) && requestSettings.get(explicitSetting) == null) {
                logger.debug("removing default [{}] setting as it in set to null in the request for [{}] creation", explicitSetting, request.index());
                additionalIndexSettings.remove(explicitSetting);
                requestSettings.remove(explicitSetting);
            }
        }
        indexSettingsBuilder.put(additionalIndexSettings.build());
        indexSettingsBuilder.put(templateSettings.build());
    }
    indexSettingsBuilder.put(requestSettings.build());
    if (sourceMetadata == null) {
        String currentTierPreference = indexSettingsBuilder.get(DataTier.TIER_PREFERENCE);
        if (DataTier.parseTierList(currentTierPreference).isEmpty()) {
            String newTierPreference = isDataStreamIndex ? DataTier.DATA_HOT : DataTier.DATA_CONTENT;
            logger.debug("enforcing default [{}] setting for [{}] creation, replacing [{}] with [{}]", DataTier.TIER_PREFERENCE, request.index(), currentTierPreference, newTierPreference);
            indexSettingsBuilder.put(DataTier.TIER_PREFERENCE, newTierPreference);
        }
    }
    if (indexSettingsBuilder.get(IndexMetadata.SETTING_VERSION_CREATED) == null) {
        DiscoveryNodes nodes = currentState.nodes();
        IndexVersion createdVersion = IndexVersion.min(IndexVersion.current(), nodes.getMaxDataNodeCompatibleIndexVersion());
        indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, createdVersion);
    }
    if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(indexSettingsBuilder) == false) {
        indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, INDEX_NUMBER_OF_SHARDS_SETTING.get(settings));
    }
    if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(indexSettingsBuilder) == false) {
        indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, INDEX_NUMBER_OF_REPLICAS_SETTING.get(settings));
    }
    if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
        indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
    }
    if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
        indexSettingsBuilder.put(SETTING_CREATION_DATE, Instant.now().toEpochMilli());
    }
    indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
    indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
    if (sourceMetadata != null) {
        assert request.resizeType() != null;
        prepareResizeIndexSettings(currentState, indexSettingsBuilder, request.recoverFrom(), request.index(), request.resizeType(), request.copySettings(), indexScopedSettings);
    }
    Settings indexSettings = indexSettingsBuilder.build();
    shardLimitValidator.validateShardLimit(indexSettings, currentState);
    validateSoftDeleteSettings(indexSettings);
    validateTranslogRetentionSettings(indexSettings);
    validateStoreTypeSetting(indexSettings);
    return indexSettings;
}
292233.22697elasticsearch
public static GeoDistanceQueryBuilder fromXContent(XContentParser parser) throws IOException {
    XContentParser.Token token;
    float boost = AbstractQueryBuilder.DEFAULT_BOOST;
    String queryName = null;
    String currentFieldName = null;
    GeoPoint point = new GeoPoint(Double.NaN, Double.NaN);
    String fieldName = null;
    Object vDistance = null;
    DistanceUnit unit = GeoDistanceQueryBuilder.DEFAULT_DISTANCE_UNIT;
    GeoDistance geoDistance = GeoDistanceQueryBuilder.DEFAULT_GEO_DISTANCE;
    GeoValidationMethod validationMethod = null;
    boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED;
    while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
        if (token == XContentParser.Token.FIELD_NAME) {
            currentFieldName = parser.currentName();
        } else if (token == XContentParser.Token.START_ARRAY) {
            fieldName = currentFieldName;
            point = GeoUtils.parseGeoPoint(parser);
        } else if (token == XContentParser.Token.START_OBJECT) {
            throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
            String currentName = parser.currentName();
            fieldName = currentFieldName;
            while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
                if (token == XContentParser.Token.FIELD_NAME) {
                    currentName = parser.currentName();
                } else if (token.isValue()) {
                    if (currentName.equals("lat")) {
                        point.resetLat(parser.doubleValue());
                    } else if (currentName.equals("lon")) {
                        point.resetLon(parser.doubleValue());
                    } else if (currentName.equals("geohash")) {
                        point.resetFromGeoHash(parser.text());
                    } else {
                        throw new ParsingException(parser.getTokenLocation(), "[geo_distance] query does not support [" + currentFieldName + "]");
                    }
                }
            }
        } else if (token.isValue()) {
            if (DISTANCE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    vDistance = parser.text();
                } else {
                    vDistance = parser.numberValue();
                }
            } else if (UNIT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                unit = DistanceUnit.fromString(parser.text());
            } else if (DISTANCE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                geoDistance = GeoDistance.fromString(parser.text());
            } else if (currentFieldName.endsWith(".lat")) {
                point.resetLat(parser.doubleValue());
                fieldName = currentFieldName.substring(0, currentFieldName.length() - ".lat".length());
            } else if (currentFieldName.endsWith(".lon")) {
                point.resetLon(parser.doubleValue());
                fieldName = currentFieldName.substring(0, currentFieldName.length() - ".lon".length());
            } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                queryName = parser.text();
            } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                boost = parser.floatValue();
            } else if (IGNORE_UNMAPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                ignoreUnmapped = parser.booleanValue();
            } else if (VALIDATION_METHOD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
                validationMethod = GeoValidationMethod.fromString(parser.text());
            } else {
                if (fieldName == null) {
                    point.resetFromString(parser.text());
                    fieldName = currentFieldName;
                } else {
                    throw new ParsingException(parser.getTokenLocation(), "failed to parse [{}] query. unexpected field [{}]", NAME, currentFieldName);
                }
            }
        }
    }
    if (vDistance == null) {
        throw new ParsingException(parser.getTokenLocation(), "geo_distance requires 'distance' to be specified");
    }
    GeoDistanceQueryBuilder qb = new GeoDistanceQueryBuilder(fieldName);
    if (vDistance instanceof Number) {
        qb.distance(((Number) vDistance).doubleValue(), unit);
    } else {
        qb.distance((String) vDistance, unit);
    }
    qb.point(point);
    if (validationMethod != null) {
        qb.setValidationMethod(validationMethod);
    }
    qb.geoDistance(geoDistance);
    qb.boost(boost);
    qb.queryName(queryName);
    qb.ignoreUnmapped(ignoreUnmapped);
    return qb;
}
292760.492691elasticsearch
private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats) {
    final String[] threadPools = req.paramAsStringArray("thread_pool_patterns", new String[] { "*" });
    final DiscoveryNodes nodes = state.getState().nodes();
    final Table table = getTableWithHeader(req);
    final Set<String> candidates = new HashSet<>();
    for (final NodeStats nodeStats : nodesStats.getNodes()) {
        for (final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) {
            candidates.add(threadPoolStats.name());
        }
    }
    final Set<String> included = new HashSet<>();
    for (final String candidate : candidates) {
        if (Regex.simpleMatch(threadPools, candidate)) {
            included.add(candidate);
        }
    }
    for (final DiscoveryNode node : nodes) {
        final NodeInfo info = nodesInfo.getNodesMap().get(node.getId());
        final NodeStats stats = nodesStats.getNodesMap().get(node.getId());
        final Map<String, ThreadPoolStats.Stats> poolThreadStats;
        final Map<String, ThreadPool.Info> poolThreadInfo;
        if (stats == null) {
            poolThreadStats = Collections.emptyMap();
            poolThreadInfo = Collections.emptyMap();
        } else {
            poolThreadStats = new TreeMap<>();
            poolThreadInfo = new HashMap<>();
            ThreadPoolStats threadPoolStats = stats.getThreadPool();
            for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) {
                poolThreadStats.put(threadPoolStat.name(), threadPoolStat);
            }
            if (info != null) {
                for (ThreadPool.Info threadPoolInfo : info.getInfo(ThreadPoolInfo.class)) {
                    poolThreadInfo.put(threadPoolInfo.getName(), threadPoolInfo);
                }
            }
        }
        for (Map.Entry<String, ThreadPoolStats.Stats> entry : poolThreadStats.entrySet()) {
            if (included.contains(entry.getKey()) == false) {
                continue;
            }
            table.startRow();
            table.addCell(node.getName());
            table.addCell(node.getId());
            table.addCell(node.getEphemeralId());
            table.addCell(info == null ? null : info.getInfo(ProcessInfo.class).getId());
            table.addCell(node.getHostName());
            table.addCell(node.getHostAddress());
            table.addCell(node.getAddress().address().getPort());
            final ThreadPoolStats.Stats poolStats = entry.getValue();
            final ThreadPool.Info poolInfo = poolThreadInfo.get(entry.getKey());
            Long maxQueueSize = null;
            TimeValue keepAlive = null;
            Integer core = null;
            Integer max = null;
            Integer size = null;
            if (poolInfo != null) {
                if (poolInfo.getQueueSize() != null) {
                    maxQueueSize = poolInfo.getQueueSize().singles();
                }
                if (poolInfo.getKeepAlive() != null) {
                    keepAlive = poolInfo.getKeepAlive();
                }
                if (poolInfo.getThreadPoolType() == ThreadPool.ThreadPoolType.SCALING) {
                    assert poolInfo.getMin() >= 0;
                    core = poolInfo.getMin();
                    assert poolInfo.getMax() > 0;
                    max = poolInfo.getMax();
                } else {
                    assert poolInfo.getMin() == poolInfo.getMax() && poolInfo.getMax() > 0;
                    size = poolInfo.getMax();
                }
            }
            table.addCell(entry.getKey());
            table.addCell(poolInfo == null ? null : poolInfo.getThreadPoolType().getType());
            table.addCell(poolStats == null ? null : poolStats.active());
            table.addCell(poolStats == null ? null : poolStats.threads());
            table.addCell(poolStats == null ? null : poolStats.queue());
            table.addCell(maxQueueSize == null ? -1 : maxQueueSize);
            table.addCell(poolStats == null ? null : poolStats.rejected());
            table.addCell(poolStats == null ? null : poolStats.largest());
            table.addCell(poolStats == null ? null : poolStats.completed());
            table.addCell(core);
            table.addCell(max);
            table.addCell(size);
            table.addCell(keepAlive);
            table.endRow();
        }
    }
    return table;
}
292008.411124elasticsearch
private void connectToNodeOrRetry(DiscoveryNode node, @Nullable ConnectionProfile connectionProfile, ConnectionValidator connectionValidator, int previousFailureCount, ActionListener<Releasable> listener) throws ConnectTransportException {
    ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile);
    if (node == null) {
        listener.onFailure(new ConnectTransportException(null, "can't connect to a null node"));
        return;
    }
    if (acquireConnectingRef() == false) {
        listener.onFailure(new ConnectTransportException(node, "connection manager is closed"));
        return;
    }
    final ActionListener<Transport.Connection> acquiringListener = listener.delegateFailure((delegate, connection) -> {
        if (connection.tryIncRef()) {
            delegate.onResponse(Releasables.releaseOnce(connection::decRef));
            return;
        }
        final int failureCount = previousFailureCount + 1;
        if (failureCount < 10) {
            logger.trace("concurrent connect/disconnect for [{}] ([{}] failures), will try again", node, failureCount);
            connection.addRemovedListener(delegate.delegateFailure((retryDelegate, ignored) -> connectToNodeOrRetry(node, connectionProfile, connectionValidator, failureCount, retryDelegate)));
        } else {
            logger.warn("failed to connect to [{}] after [{}] attempts, giving up", node.descriptionWithoutAttributes(), failureCount);
            delegate.onFailure(new ConnectTransportException(node, "concurrently connecting and disconnecting even after [" + failureCount + "] attempts"));
        }
    });
    final Transport.Connection existingConnection = connectedNodes.get(node);
    if (existingConnection != null) {
        connectingRefCounter.decRef();
        acquiringListener.onResponse(existingConnection);
        return;
    }
    final ListenableFuture<Transport.Connection> currentListener = new ListenableFuture<>();
    final ListenableFuture<Transport.Connection> existingListener = pendingConnections.putIfAbsent(node, currentListener);
    if (existingListener != null) {
        try {
            existingListener.addListener(acquiringListener);
        } finally {
            connectingRefCounter.decRef();
        }
        return;
    }
    currentListener.addListener(acquiringListener);
    final Transport.Connection existingConnectionRecheck = connectedNodes.get(node);
    if (existingConnectionRecheck != null) {
        ListenableFuture<Transport.Connection> future = pendingConnections.remove(node);
        assert future == currentListener : "Listener in pending map is different than the expected listener";
        connectingRefCounter.decRef();
        future.onResponse(existingConnectionRecheck);
        return;
    }
    final RunOnce releaseOnce = new RunOnce(connectingRefCounter::decRef);
    internalOpenConnection(node, resolvedProfile, ActionListener.wrap(conn -> connectionValidator.validate(conn, resolvedProfile, ActionListener.runAfter(ActionListener.wrap(ignored -> {
        assert Transports.assertNotTransportThread("connection validator success");
        final var managerRefs = AbstractRefCounted.of(conn::onRemoved);
        try {
            if (connectedNodes.putIfAbsent(node, conn) != null) {
                assert false : "redundant connection to " + node;
                logger.warn("existing connection to node [{}], closing new redundant connection", node);
                IOUtils.closeWhileHandlingException(conn);
            } else {
                logger.debug("connected to node [{}]", node);
                managerRefs.mustIncRef();
                try {
                    connectionListener.onNodeConnected(node, conn);
                } finally {
                    conn.addCloseListener(ActionListener.running(() -> {
                        connectedNodes.remove(node, conn);
                        connectionListener.onNodeDisconnected(node, conn);
                        managerRefs.decRef();
                    }));
                    conn.addCloseListener(ActionListener.running(() -> {
                        if (connectingRefCounter.hasReferences() == false) {
                            logger.trace("connection manager shut down, closing transport connection to [{}]", node);
                        } else if (conn.hasReferences()) {
                            logger.info("transport connection to [{}] closed by remote", node.descriptionWithoutAttributes());
                        } else {
                            logger.debug("closing unused transport connection to [{}]", node);
                        }
                    }));
                }
            }
        } finally {
            ListenableFuture<Transport.Connection> future = pendingConnections.remove(node);
            assert future == currentListener : "Listener in pending map is different than the expected listener";
            managerRefs.decRef();
            releaseOnce.run();
            future.onResponse(conn);
        }
    }, e -> {
        assert Transports.assertNotTransportThread("connection validator failure");
        IOUtils.closeWhileHandlingException(conn);
        failConnectionListener(node, releaseOnce, e, currentListener);
    }), conn::decRef)), e -> {
        assert Transports.assertNotTransportThread("internalOpenConnection failure");
        failConnectionListener(node, releaseOnce, e, currentListener);
    }));
}
291669.5214133elasticsearch
protected FieldCapabilitiesNodeRequest mutateInstance(FieldCapabilitiesNodeRequest instance) {
    switch(random().nextInt(8)) {
        case 0 ->
            {
                List<ShardId> shardIds = randomShardIds(instance.shardIds().size() + 1);
                return new FieldCapabilitiesNodeRequest(shardIds, instance.fields(), instance.filters(), instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 1 ->
            {
                String[] fields = randomFields(instance.fields().length + 2);
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), fields, instance.filters(), instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 2 ->
            {
                OriginalIndices originalIndices = randomOriginalIndices(instance.indices().length + 1);
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), instance.allowedTypes(), originalIndices, instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 3 ->
            {
                QueryBuilder indexFilter = instance.indexFilter() == null ? QueryBuilders.matchAllQuery() : null;
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), instance.allowedTypes(), instance.originalIndices(), indexFilter, instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 4 ->
            {
                long nowInMillis = instance.nowInMillis() + 100;
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), nowInMillis, instance.runtimeFields(), true);
            }
        case 5 ->
            {
                Map<String, Object> runtimeFields = instance.runtimeFields() == null ? Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)) : null;
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), runtimeFields, true);
            }
        case 6 ->
            {
                String[] randomFilter = instance.filters().length > 0 ? Strings.EMPTY_ARRAY : new String[] { "-nested" };
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), randomFilter, instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 7 ->
            {
                String[] randomType = instance.allowedTypes().length > 0 ? Strings.EMPTY_ARRAY : new String[] { "text" };
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), randomType, instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), true);
            }
        case 8 ->
            {
                return new FieldCapabilitiesNodeRequest(instance.shardIds(), instance.fields(), instance.filters(), instance.allowedTypes(), instance.originalIndices(), instance.indexFilter(), instance.nowInMillis(), instance.runtimeFields(), false);
            }
        default ->
            throw new IllegalStateException("The test should only allow 8 parameters mutated");
    }
}
293471.951894elasticsearch
public void testParseWithProvidedPipeline() throws Exception {
    int numDocs = randomIntBetween(1, 10);
    Map<String, Object> requestContent = new HashMap<>();
    List<Map<String, Object>> docs = new ArrayList<>();
    List<Map<String, Object>> expectedDocs = new ArrayList<>();
    requestContent.put(Fields.DOCS, docs);
    for (int i = 0; i < numDocs; i++) {
        Map<String, Object> doc = new HashMap<>();
        Map<String, Object> expectedDoc = new HashMap<>();
        List<Metadata> fields = Arrays.asList(INDEX, ID, ROUTING, VERSION, VERSION_TYPE, IF_SEQ_NO, IF_PRIMARY_TERM);
        for (Metadata field : fields) {
            if (field == VERSION) {
                Object value = randomBoolean() ? randomLong() : randomInt();
                doc.put(field.getFieldName(), randomBoolean() ? value : value.toString());
                long longValue = (long) value;
                expectedDoc.put(field.getFieldName(), longValue);
            } else if (field == VERSION_TYPE) {
                String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE));
                doc.put(field.getFieldName(), value);
                expectedDoc.put(field.getFieldName(), value);
            } else if (field == IF_SEQ_NO || field == IF_PRIMARY_TERM) {
                Object value = randomBoolean() ? randomNonNegativeLong() : randomInt(1000);
                doc.put(field.getFieldName(), randomBoolean() ? value : value.toString());
                long longValue = (long) value;
                expectedDoc.put(field.getFieldName(), longValue);
            } else {
                if (randomBoolean()) {
                    String value = randomAlphaOfLengthBetween(1, 10);
                    doc.put(field.getFieldName(), value);
                    expectedDoc.put(field.getFieldName(), value);
                } else {
                    Integer value = randomIntBetween(1, 1000000);
                    doc.put(field.getFieldName(), value);
                    expectedDoc.put(field.getFieldName(), String.valueOf(value));
                }
            }
        }
        String fieldName = randomAlphaOfLengthBetween(1, 10);
        String fieldValue = randomAlphaOfLengthBetween(1, 10);
        doc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
        docs.add(doc);
        expectedDoc.put(Fields.SOURCE, Collections.singletonMap(fieldName, fieldValue));
        expectedDocs.add(expectedDoc);
    }
    Map<String, Object> pipelineConfig = new HashMap<>();
    List<Map<String, Object>> processors = new ArrayList<>();
    int numProcessors = randomIntBetween(1, 10);
    for (int i = 0; i < numProcessors; i++) {
        Map<String, Object> processorConfig = new HashMap<>();
        List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
        int numOnFailureProcessors = randomIntBetween(0, 1);
        for (int j = 0; j < numOnFailureProcessors; j++) {
            onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
        }
        if (numOnFailureProcessors > 0) {
            processorConfig.put("on_failure", onFailureProcessors);
        }
        processors.add(Collections.singletonMap("mock_processor", processorConfig));
    }
    pipelineConfig.put("processors", processors);
    List<Map<String, Object>> onFailureProcessors = new ArrayList<>();
    int numOnFailureProcessors = randomIntBetween(0, 1);
    for (int i = 0; i < numOnFailureProcessors; i++) {
        onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap()));
    }
    if (numOnFailureProcessors > 0) {
        pipelineConfig.put("on_failure", onFailureProcessors);
    }
    requestContent.put(Fields.PIPELINE, pipelineConfig);
    SimulatePipelineRequest.Parsed actualRequest = SimulatePipelineRequest.parse(requestContent, false, ingestService, RestApiVersion.current());
    assertThat(actualRequest.verbose(), equalTo(false));
    assertThat(actualRequest.documents().size(), equalTo(numDocs));
    Iterator<Map<String, Object>> expectedDocsIterator = expectedDocs.iterator();
    for (IngestDocument ingestDocument : actualRequest.documents()) {
        Map<String, Object> expectedDocument = expectedDocsIterator.next();
        org.elasticsearch.script.Metadata metadata = ingestDocument.getMetadata();
        assertThat(metadata.get(INDEX.getFieldName()), equalTo(expectedDocument.get(INDEX.getFieldName())));
        assertThat(metadata.get(ID.getFieldName()), equalTo(expectedDocument.get(ID.getFieldName())));
        assertThat(metadata.get(ROUTING.getFieldName()), equalTo(expectedDocument.get(ROUTING.getFieldName())));
        assertThat(metadata.get(VERSION.getFieldName()), equalTo(expectedDocument.get(VERSION.getFieldName())));
        assertThat(metadata.get(VERSION_TYPE.getFieldName()), equalTo(expectedDocument.get(VERSION_TYPE.getFieldName())));
        assertThat(metadata.get(IF_SEQ_NO.getFieldName()), equalTo(expectedDocument.get(IF_SEQ_NO.getFieldName())));
        assertThat(metadata.get(IF_PRIMARY_TERM.getFieldName()), equalTo(expectedDocument.get(IF_PRIMARY_TERM.getFieldName())));
        assertThat(ingestDocument.getSource(), equalTo(expectedDocument.get(Fields.SOURCE)));
    }
    assertThat(actualRequest.pipeline().getId(), equalTo(SIMULATED_PIPELINE_ID));
    assertThat(actualRequest.pipeline().getDescription(), nullValue());
    assertThat(actualRequest.pipeline().getProcessors().size(), equalTo(numProcessors));
}
292275.411142elasticsearch
public void testResolveAliases() {
    Metadata.Builder mdBuilder = Metadata.builder().put(indexBuilder("foo_foo").state(State.OPEN)).put(indexBuilder("bar_bar").state(State.OPEN)).put(indexBuilder("foo_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias"))).put(indexBuilder("bar_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias")));
    ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build();
    IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, false, false);
    IndexNameExpressionResolver.Context indicesAndAliasesContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions, SystemIndexAccessLevel.NONE);
    IndicesOptions skipAliasesIndicesOptions = IndicesOptions.fromOptions(true, true, true, false, true, false, true, false);
    IndexNameExpressionResolver.Context skipAliasesLenientContext = new IndexNameExpressionResolver.Context(state, skipAliasesIndicesOptions, SystemIndexAccessLevel.NONE);
    IndicesOptions errorOnAliasIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true, false);
    IndexNameExpressionResolver.Context skipAliasesStrictContext = new IndexNameExpressionResolver.Context(state, errorOnAliasIndicesOptions, SystemIndexAccessLevel.NONE);
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_a*"));
        assertThat(indices, containsInAnyOrder("foo_index", "bar_index"));
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo_a*"));
        assertEquals(0, indices.size());
    }
    {
        IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve(skipAliasesStrictContext, Collections.singletonList("foo_a*")));
        assertEquals("foo_a*", infe.getIndex().getName());
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo*"));
        assertThat(indices, containsInAnyOrder("foo_foo", "foo_index", "bar_index"));
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo*"));
        assertThat(indices, containsInAnyOrder("foo_foo", "foo_index"));
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(skipAliasesStrictContext, Collections.singletonList("foo*"));
        assertThat(indices, containsInAnyOrder("foo_foo", "foo_index"));
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_alias"));
        assertThat(indices, containsInAnyOrder("foo_alias"));
    }
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(skipAliasesLenientContext, Collections.singletonList("foo_alias"));
        assertThat(indices, containsInAnyOrder("foo_alias"));
    }
    {
        IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IndexNameExpressionResolver.resolveExpressions(skipAliasesStrictContext, "foo_alias"));
        assertEquals("The provided expression [foo_alias] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage());
    }
    IndicesOptions noExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions(true, false, false, false, true, false, true, false);
    IndexNameExpressionResolver.Context noExpandNoAliasesContext = new IndexNameExpressionResolver.Context(state, noExpandNoAliasesIndicesOptions, SystemIndexAccessLevel.NONE);
    {
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(noExpandNoAliasesContext, List.of("foo_alias"));
        assertThat(indices, containsInAnyOrder("foo_alias"));
    }
    IndicesOptions strictNoExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions(false, true, false, false, true, false, true, false);
    IndexNameExpressionResolver.Context strictNoExpandNoAliasesContext = new IndexNameExpressionResolver.Context(state, strictNoExpandNoAliasesIndicesOptions, SystemIndexAccessLevel.NONE);
    {
        IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IndexNameExpressionResolver.resolveExpressions(strictNoExpandNoAliasesContext, "foo_alias"));
        assertEquals("The provided expression [foo_alias] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage());
    }
}
292090.911150elasticsearch
public void testResolveDataStreams() {
    String dataStreamName = "foo_logs";
    long epochMillis = randomLongBetween(1580536800000L, 1583042400000L);
    IndexMetadata firstBackingIndexMetadata = createBackingIndex(dataStreamName, 1, epochMillis).build();
    IndexMetadata secondBackingIndexMetadata = createBackingIndex(dataStreamName, 2, epochMillis).build();
    Metadata.Builder mdBuilder = Metadata.builder().put(indexBuilder("foo_foo").state(State.OPEN)).put(indexBuilder("bar_bar").state(State.OPEN)).put(indexBuilder("foo_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias"))).put(indexBuilder("bar_index").state(State.OPEN).putAlias(AliasMetadata.builder("foo_alias"))).put(firstBackingIndexMetadata, true).put(secondBackingIndexMetadata, true).put(DataStreamTestHelper.newInstance(dataStreamName, List.of(firstBackingIndexMetadata.getIndex(), secondBackingIndexMetadata.getIndex())));
    ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build();
    {
        IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, false, false);
        IndexNameExpressionResolver.Context indicesAndAliasesContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions, SystemIndexAccessLevel.NONE);
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_*"));
        assertThat(indices, containsInAnyOrder("foo_index", "foo_foo", "bar_index"));
        indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, Collections.singletonList("bar_*"));
        assertThat(indices, containsInAnyOrder("bar_bar", "bar_index"));
    }
    {
        IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, false, false);
        IndexNameExpressionResolver.Context indicesAliasesAndDataStreamsContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions, false, false, true, SystemIndexAccessLevel.NONE, NONE, NONE);
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAliasesAndDataStreamsContext, Collections.singletonList("foo_*"));
        assertThat(indices, containsInAnyOrder("foo_index", "bar_index", "foo_foo", DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis)));
        indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAliasesAndDataStreamsContext, Collections.singletonList("*"));
        assertThat(indices, containsInAnyOrder("foo_index", "bar_index", "foo_foo", "bar_bar", DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis)));
    }
    {
        IndicesOptions indicesAliasesAndExpandHiddenOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, true, false, false, false);
        IndexNameExpressionResolver.Context indicesAliasesDataStreamsAndHiddenIndices = new IndexNameExpressionResolver.Context(state, indicesAliasesAndExpandHiddenOptions, false, false, true, SystemIndexAccessLevel.NONE, NONE, NONE);
        Collection<String> indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAliasesDataStreamsAndHiddenIndices, Collections.singletonList("foo_*"));
        assertThat(indices, containsInAnyOrder("foo_index", "bar_index", "foo_foo", DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis)));
        indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAliasesDataStreamsAndHiddenIndices, Collections.singletonList("*"));
        assertThat(indices, containsInAnyOrder("foo_index", "bar_index", "foo_foo", "bar_bar", DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis)));
    }
}
293566.241124elasticsearch
public void testSizeShrinkIndex() {
    Map<String, Long> shardSizes = new HashMap<>();
    shardSizes.put("[test][0][p]", 10L);
    shardSizes.put("[test][1][p]", 100L);
    shardSizes.put("[test][2][p]", 500L);
    shardSizes.put("[test][3][p]", 500L);
    ClusterInfo info = new DevNullClusterInfo(Map.of(), Map.of(), shardSizes);
    Metadata.Builder metaBuilder = Metadata.builder();
    metaBuilder.put(IndexMetadata.builder("test").settings(settings(IndexVersion.current()).put("index.uuid", "1234")).numberOfShards(4).numberOfReplicas(0));
    metaBuilder.put(IndexMetadata.builder("target").settings(settings(IndexVersion.current()).put("index.uuid", "5678").put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")).numberOfShards(1).numberOfReplicas(0));
    metaBuilder.put(IndexMetadata.builder("target2").settings(settings(IndexVersion.current()).put("index.uuid", "9101112").put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")).numberOfShards(2).numberOfReplicas(0));
    Metadata metadata = metaBuilder.build();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY);
    routingTableBuilder.addAsNew(metadata.index("test"));
    routingTableBuilder.addAsNew(metadata.index("target"));
    routingTableBuilder.addAsNew(metadata.index("target2"));
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTableBuilder.build()).build();
    AllocationService allocationService = createAllocationService();
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
    clusterState = allocationService.reroute(clusterState, "foo", ActionListener.noop());
    clusterState = startShardsAndReroute(allocationService, clusterState, clusterState.getRoutingTable().index("test").shardsWithState(ShardRoutingState.UNASSIGNED));
    RoutingAllocation allocation = new RoutingAllocation(null, clusterState, info, null, 0);
    final Index index = new Index("test", "1234");
    ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_0 = ShardRoutingHelper.initialize(test_0, "node1");
    test_0 = ShardRoutingHelper.moveToStarted(test_0);
    ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_1 = ShardRoutingHelper.initialize(test_1, "node2");
    test_1 = ShardRoutingHelper.moveToStarted(test_1);
    ShardRouting test_2 = ShardRouting.newUnassigned(new ShardId(index, 2), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_2 = ShardRoutingHelper.initialize(test_2, "node1");
    ShardRouting test_3 = ShardRouting.newUnassigned(new ShardId(index, 3), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    test_3 = ShardRoutingHelper.initialize(test_3, "node1");
    assertEquals(500L, getExpectedShardSize(test_3, 0L, allocation));
    assertEquals(500L, getExpectedShardSize(test_2, 0L, allocation));
    assertEquals(100L, getExpectedShardSize(test_1, 0L, allocation));
    assertEquals(10L, getExpectedShardSize(test_0, 0L, allocation));
    ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    assertEquals(1110L, getExpectedShardSize(target, 0L, allocation));
    ShardRouting target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    assertEquals(110L, getExpectedShardSize(target2, 0L, allocation));
    target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 1), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"), ShardRouting.Role.DEFAULT);
    assertEquals(1000L, getExpectedShardSize(target2, 0L, allocation));
    ClusterState clusterStateWithMissingSourceIndex = ClusterState.builder(clusterState).metadata(Metadata.builder(metadata).remove("test")).routingTable(RoutingTable.builder(clusterState.routingTable()).remove("test").build()).build();
    allocationService.reroute(clusterState, "foo", ActionListener.noop());
    RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, clusterStateWithMissingSourceIndex, info, null, 0);
    assertEquals(42L, getExpectedShardSize(target, 42L, allocationWithMissingSourceIndex));
    assertEquals(42L, getExpectedShardSize(target2, 42L, allocationWithMissingSourceIndex));
}
29307.7619203elasticsearch
private void doTestDiskMonitorLogging(boolean testHeadroom) throws IllegalAccessException {
    final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).nodes(DiscoveryNodes.builder().add(newNormalNode("node1")).add(newFrozenOnlyNode("frozen"))).build();
    final AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(clusterState);
    final AtomicBoolean advanceTime = new AtomicBoolean(true);
    final LongSupplier timeSupplier = new LongSupplier() {

        long time;

        @Override
        public long getAsLong() {
            if (advanceTime.get()) {
                time += DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(Settings.EMPTY).getMillis() + 1;
            }
            logger.info("time: [{}]", time);
            return time;
        }
    };
    final AtomicLong relocatingShardSizeRef = new AtomicLong();
    DiskThresholdMonitor monitor = new DiskThresholdMonitor(Settings.EMPTY, clusterStateRef::get, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, timeSupplier, (reason, priority, listener) -> listener.onResponse(null)) {

        @Override
        protected void updateIndicesReadOnly(Set<String> indicesToMarkReadOnly, Releasable onCompletion, boolean readOnly) {
            onCompletion.close();
        }

        @Override
        long sizeOfRelocatingShards(RoutingNode routingNode, DiskUsage diskUsage, ClusterInfo info, ClusterState reroutedClusterState) {
            return relocatingShardSizeRef.get();
        }
    };
    long thousandTb = ByteSizeValue.ofTb(1000).getBytes();
    long total = testHeadroom ? thousandTb : 100;
    Map<String, DiskUsage> allDisksOk = new HashMap<>();
    allDisksOk.put("node1", new DiskUsage("node1", "node1", "/foo/bar", total, testHeadroom ? betweenGb(200, 1000) : between(15, 100)));
    allDisksOk.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", total, testHeadroom ? (randomBoolean() ? betweenGb(20, 1000) : betweenGb(20, 50)) : between(15, 100)));
    Map<String, DiskUsage> aboveLowWatermark = new HashMap<>();
    aboveLowWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", total, testHeadroom ? betweenGb(150, 199) : between(10, 14)));
    aboveLowWatermark.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", total, testHeadroom ? betweenGb(150, 199) : between(10, 14)));
    Map<String, DiskUsage> aboveHighWatermark = new HashMap<>();
    aboveHighWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", total, testHeadroom ? betweenGb(100, 149) : between(5, 9)));
    aboveHighWatermark.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", total, testHeadroom ? betweenGb(20, 99) : between(5, 9)));
    Map<String, DiskUsage> aboveFloodStageWatermark = new HashMap<>();
    aboveFloodStageWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", total, testHeadroom ? betweenGb(0, 99) : between(0, 4)));
    aboveFloodStageWatermark.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", total, testHeadroom ? betweenGb(20, 99) : between(5, 9)));
    Map<String, DiskUsage> frozenAboveFloodStageWatermark = new HashMap<>();
    frozenAboveFloodStageWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", total, testHeadroom ? betweenGb(200, 1000) : between(15, 100)));
    frozenAboveFloodStageWatermark.put("frozen", new DiskUsage("frozen", "frozen", "/foo/bar", total, testHeadroom ? betweenGb(0, 19) : between(0, 4)));
    advanceTime.set(true);
    assertNoLogging(monitor, allDisksOk);
    advanceTime.set(randomBoolean());
    assertNoLogging(monitor, allDisksOk);
    String lowWatermarkString = testHeadroom ? "max_headroom=200gb" : "85%";
    String highWatermarkString = testHeadroom ? "max_headroom=150gb" : "90%";
    String floodWatermarkString = testHeadroom ? "max_headroom=100gb" : "95%";
    String frozenFloodWatermarkString = testHeadroom ? "max_headroom=20gb" : "95%";
    assertSingleInfoMessage(monitor, aboveLowWatermark, "low disk watermark [" + lowWatermarkString + "] exceeded on *node1* replicas will not be assigned to this node");
    advanceTime.set(false);
    assertSingleWarningMessage(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete");
    advanceTime.set(true);
    assertRepeatedWarningMessages(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete");
    advanceTime.set(randomBoolean());
    assertRepeatedWarningMessages(monitor, aboveFloodStageWatermark, "flood stage disk watermark [" + floodWatermarkString + "] exceeded on *node1* all indices on this node will be marked read-only");
    relocatingShardSizeRef.set(testHeadroom ? (-1L) * ByteSizeValue.ofGb(100).getBytes() : -5L);
    advanceTime.set(true);
    assertSingleInfoMessage(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to be below the high disk watermark when these relocations are complete");
    relocatingShardSizeRef.set(0L);
    timeSupplier.getAsLong();
    advanceTime.set(false);
    assertSingleWarningMessage(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete");
    advanceTime.set(true);
    assertRepeatedWarningMessages(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete");
    advanceTime.set(randomBoolean());
    assertSingleInfoMessage(monitor, aboveLowWatermark, "high disk watermark [" + highWatermarkString + "] no longer exceeded on *node1* but low disk watermark [" + lowWatermarkString + "] is still exceeded");
    advanceTime.set(true);
    assertSingleInfoMessage(monitor, allDisksOk, "low disk watermark [" + lowWatermarkString + "] no longer exceeded on *node1*");
    advanceTime.set(randomBoolean());
    assertRepeatedWarningMessages(monitor, aboveFloodStageWatermark, "flood stage disk watermark [" + floodWatermarkString + "] exceeded on *node1* all indices on this node will be marked read-only");
    assertSingleInfoMessage(monitor, allDisksOk, "low disk watermark [" + lowWatermarkString + "] no longer exceeded on *node1*");
    advanceTime.set(true);
    assertRepeatedWarningMessages(monitor, aboveHighWatermark, "high disk watermark [" + highWatermarkString + "] exceeded on *node1* shards will be relocated away from this node* " + "the node is expected to continue to exceed the high disk watermark when these relocations are complete");
    assertSingleInfoMessage(monitor, allDisksOk, "low disk watermark [" + lowWatermarkString + "] no longer exceeded on *node1*");
    assertRepeatedWarningMessages(monitor, aboveFloodStageWatermark, "flood stage disk watermark [" + floodWatermarkString + "] exceeded on *node1* all indices on this node will be marked read-only");
    assertSingleInfoMessage(monitor, aboveLowWatermark, "high disk watermark [" + highWatermarkString + "] no longer exceeded on *node1* but low disk watermark [" + lowWatermarkString + "] is still exceeded");
    assertSingleInfoMessage(monitor, allDisksOk, "low disk watermark [" + lowWatermarkString + "] no longer exceeded on *node1*");
    assertRepeatedWarningMessages(monitor, frozenAboveFloodStageWatermark, "flood stage disk watermark [" + frozenFloodWatermarkString + "] exceeded on *frozen*");
    assertNoLogging(monitor, allDisksOk);
}
293187.761132elasticsearch
public void testMinimumTotalSizeForBelowLowWatermark() {
    ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    DiskThresholdSettings diskThresholdSettings = new DiskThresholdSettings(Settings.EMPTY, nss);
    assertEquals(ByteSizeValue.ofBytes(1000), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(850)));
    assertEquals(ByteSizeValue.add(ByteSizeValue.ofTb(100), ByteSizeValue.ofGb(200)), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofTb(100)));
    final long factor = between(1, 1000);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(85 * factor)), Matchers.equalTo(ByteSizeValue.ofBytes(100L * factor)));
    Settings newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1gb").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "100mb").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "10mb").build();
    nss.applySettings(newSettings);
    assertEquals(ByteSizeValue.add(ByteSizeValue.ofGb(1), ByteSizeValue.ofBytes(850)), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(850)));
    assertEquals(ByteSizeValue.add(ByteSizeValue.ofTb(100), ByteSizeValue.ofGb(1)), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofTb(100)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "0.50").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), "-1").build();
    nss.applySettings(newSettings);
    assertEquals(ByteSizeValue.ofBytes(1700), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(850)));
    assertEquals(ByteSizeValue.ofBytes((long) (ByteSizeValue.ofTb(100).getBytes() / 0.5)), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofTb(100)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "0.50").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), "500gb").build();
    nss.applySettings(newSettings);
    assertEquals(ByteSizeValue.ofBytes(1700), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(850)));
    assertEquals(ByteSizeValue.add(ByteSizeValue.ofTb(100), ByteSizeValue.ofGb(500)), diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofTb(100)));
    final long percentage = between(1, 89);
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), percentage + "%").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(percentage * factor)), Matchers.equalTo(ByteSizeValue.ofBytes(100L * factor)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "57%").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(32547L)), Matchers.equalTo(ByteSizeValue.ofBytes(57100L)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "68%").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(4080L)), Matchers.equalTo(ByteSizeValue.ofBytes(6000)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "29%").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(17777L)), Matchers.equalTo(ByteSizeValue.ofBytes(61300)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(90L)), Matchers.equalTo(ByteSizeValue.ofBytes(100L)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), "1b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), "1b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), "1b").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(90L)), Matchers.equalTo(ByteSizeValue.ofBytes(91L)));
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING.getKey(), "0b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_MAX_HEADROOM_SETTING.getKey(), "0b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_MAX_HEADROOM_SETTING.getKey(), "0b").build();
    nss.applySettings(newSettings);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(90L)), Matchers.equalTo(ByteSizeValue.ofBytes(90L)));
    final long absolute = between(1, 1000);
    newSettings = Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), absolute + "b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), absolute + "b").put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), absolute + "b").build();
    nss.applySettings(newSettings);
    long needed = between(0, 1000);
    assertThat(diskThresholdSettings.getMinimumTotalSizeForBelowLowWatermark(ByteSizeValue.ofBytes(needed)), Matchers.equalTo(ByteSizeValue.ofBytes(needed + absolute)));
}
295656.9796elasticsearch
public void testBalanceAllNodesStarted() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put("cluster.routing.allocation.node_initial_primaries_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).addAsNew(metadata.index("test1")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue());
    }
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(1).currentNodeId(), nullValue());
    }
    logger.info("Adding three node and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
    }
    logger.info("Another round of rebalancing");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the more shards");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).count(), equalTo(2L));
}
296039.57799elasticsearch
public void testBalanceIncrementallyStartNodes() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put("cluster.routing.allocation.node_initial_primaries_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(3).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).addAsNew(metadata.index("test1")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test").shard(i).shard(1).currentNodeId(), nullValue());
    }
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(1).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(0).currentNodeId(), nullValue());
        assertThat(clusterState.routingTable().index("test1").shard(i).shard(1).currentNodeId(), nullValue());
    }
    logger.info("Adding one node and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
    }
    logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the primary shard");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    logger.info("Reroute, nothing should change");
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Start the backup shard");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    logger.info("Add another node and perform reroute to relocate shards to the new node");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    clusterState = applyStartedShardsUntilNoChange(clusterState, strategy);
    assertThat(clusterState.routingTable().index("test").size(), equalTo(3));
    assertThat(clusterState.routingTable().index("test1").size(), equalTo(3));
    assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(clusterState.getRoutingNodes().node("node3").numberOfShardsWithState(STARTED), equalTo(4));
    assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState("test", STARTED).count(), equalTo(2L));
    assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState("test1", STARTED).count(), equalTo(2L));
    assertThat(clusterState.getRoutingNodes().node("node3").shardsWithState("test1", STARTED).count(), equalTo(2L));
}
294557.541298elasticsearch
public void testMultiIndexEvenDistribution() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
    final int numberOfIndices = 50;
    logger.info("Building initial routing table with " + numberOfIndices + " indices");
    Metadata.Builder metadataBuilder = Metadata.builder();
    for (int i = 0; i < numberOfIndices; i++) {
        metadataBuilder.put(IndexMetadata.builder("test" + i).settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(0));
    }
    Metadata metadata = metadataBuilder.build();
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY);
    for (int i = 0; i < numberOfIndices; i++) {
        routingTableBuilder.addAsNew(metadata.index("test" + i));
    }
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTableBuilder.build()).build();
    assertThat(clusterState.routingTable().indicesRouting().size(), equalTo(numberOfIndices));
    for (int i = 0; i < numberOfIndices; i++) {
        assertThat(clusterState.routingTable().index("test" + i).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).state(), equalTo(UNASSIGNED));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).currentNodeId(), nullValue());
    }
    logger.info("Adding " + (numberOfIndices / 2) + " nodes");
    DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
    for (int i = 0; i < (numberOfIndices / 2); i++) {
        nodesBuilder.add(newNode("node" + i));
    }
    clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
    ClusterState newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    for (int i = 0; i < numberOfIndices; i++) {
        assertThat(clusterState.routingTable().index("test" + i).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).unassigned(), equalTo(false));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).primary(), equalTo(true));
        String nodeId = clusterState.routingTable().index("test" + i).shard(0).shard(0).currentNodeId();
        int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
        assertThat(nodeIndex, lessThan(25));
    }
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    Set<String> encounteredIndices = new HashSet<>();
    for (RoutingNode routingNode : routingNodes) {
        assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0));
        assertThat(routingNode.size(), equalTo(2));
        int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length()));
        assertThat(nodeIndex, lessThan(25));
        for (ShardRouting shardRoutingEntry : routingNode) {
            assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.getIndexName())));
            encounteredIndices.add(shardRoutingEntry.getIndexName());
        }
    }
    logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
    nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
    for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
        nodesBuilder.add(newNode("node" + i));
    }
    clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, equalTo(clusterState));
    logger.info("Marking the shard as started");
    newState = startShardsAndReroute(strategy, clusterState, shardsWithState(routingNodes, INITIALIZING));
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    int numberOfRelocatingShards = 0;
    int numberOfStartedShards = 0;
    for (int i = 0; i < numberOfIndices; i++) {
        assertThat(clusterState.routingTable().index("test" + i).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).unassigned(), equalTo(false));
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
        if (clusterState.routingTable().index("test" + i).shard(0).shard(0).state() == STARTED) {
            numberOfStartedShards++;
        } else if (clusterState.routingTable().index("test" + i).shard(0).shard(0).state() == RELOCATING) {
            numberOfRelocatingShards++;
        }
        assertThat(clusterState.routingTable().index("test" + i).shard(0).shard(0).primary(), equalTo(true));
        String nodeId = clusterState.routingTable().index("test" + i).shard(0).shard(0).currentNodeId();
        int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
        assertThat(nodeIndex, lessThan(25));
    }
    assertThat(numberOfRelocatingShards, equalTo(25));
    assertThat(numberOfStartedShards, equalTo(25));
}
293266.1411107elasticsearch
public void testPrune() throws IOException {
    try (Directory dir = newDirectory()) {
        IndexWriterConfig iwc = newIndexWriterConfig();
        iwc.setSoftDeletesField("_soft_deletes");
        MergePolicy mp = new SoftDeletesRetentionMergePolicy("_soft_deletes", MatchAllDocsQuery::new, new PrunePostingsMergePolicy(newLogMergePolicy(), "id"));
        iwc.setMergePolicy(new ShuffleForcedMergePolicy(mp));
        boolean sorted = randomBoolean();
        if (sorted) {
            iwc.setIndexSort(new Sort(new SortField("sort", SortField.Type.INT)));
        }
        int numUniqueDocs = randomIntBetween(1, 100);
        int numDocs = randomIntBetween(numUniqueDocs, numUniqueDocs * 5);
        try (IndexWriter writer = new IndexWriter(dir, iwc)) {
            for (int i = 0; i < numDocs; i++) {
                if (rarely()) {
                    writer.flush();
                }
                if (rarely()) {
                    writer.forceMerge(1, false);
                }
                int id = i % numUniqueDocs;
                Document doc = new Document();
                doc.add(new StringField("id", "" + id, Field.Store.NO));
                doc.add(newTextField("text", "the quick brown fox", Field.Store.YES));
                doc.add(new NumericDocValuesField("sort", i));
                writer.softUpdateDocument(new Term("id", "" + id), doc, new NumericDocValuesField("_soft_deletes", 1));
                if (i == 0) {
                    writer.flush();
                }
            }
            writer.forceMerge(1);
            try (DirectoryReader reader = DirectoryReader.open(writer)) {
                LeafReader leafReader = reader.leaves().get(0).reader();
                assertEquals(numDocs, leafReader.maxDoc());
                Terms id = leafReader.terms("id");
                TermsEnum iterator = id.iterator();
                for (int i = 0; i < numUniqueDocs; i++) {
                    assertTrue(iterator.seekExact(new BytesRef("" + i)));
                    assertEquals(1, iterator.docFreq());
                }
                iterator = leafReader.terms("text").iterator();
                assertTrue(iterator.seekExact(new BytesRef("quick")));
                assertEquals(leafReader.maxDoc(), iterator.docFreq());
                int numValues = 0;
                NumericDocValues sort = leafReader.getNumericDocValues("sort");
                while (sort.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                    if (sorted) {
                        assertEquals(sort.docID(), sort.longValue());
                    } else {
                        assertTrue(sort.longValue() >= 0);
                        assertTrue(sort.longValue() < numDocs);
                    }
                    numValues++;
                }
                assertEquals(numValues, numDocs);
            }
            {
                Document doc = new Document();
                doc.add(new StringField("id", "test", Field.Store.NO));
                writer.deleteDocuments(new Term("id", "test"));
                writer.flush();
                writer.forceMerge(1);
                writer.updateNumericDocValue(new Term("id", "test"), "_soft_deletes", 1);
                writer.flush();
                writer.forceMerge(1);
                try (DirectoryReader reader = DirectoryReader.open(writer)) {
                    LeafReader leafReader = reader.leaves().get(0).reader();
                    assertEquals(numDocs, leafReader.maxDoc());
                    Terms id = leafReader.terms("id");
                    TermsEnum iterator = id.iterator();
                    assertEquals(numUniqueDocs, id.size());
                    for (int i = 0; i < numUniqueDocs; i++) {
                        assertTrue(iterator.seekExact(new BytesRef("" + i)));
                        assertEquals(1, iterator.docFreq());
                    }
                    assertFalse(iterator.seekExact(new BytesRef("test")));
                    iterator = leafReader.terms("text").iterator();
                    assertTrue(iterator.seekExact(new BytesRef("quick")));
                    assertEquals(leafReader.maxDoc(), iterator.docFreq());
                }
            }
            {
                Document doc = new Document();
                doc.add(new StringField("id", "" + 0, Field.Store.NO));
                doc.add(newTextField("text", "the quick brown fox", Field.Store.YES));
                doc.add(new NumericDocValuesField("sort", 0));
                writer.softUpdateDocument(new Term("id", "" + 0), doc, new NumericDocValuesField("_soft_deletes", 1));
                for (int i = 0; i < numUniqueDocs; i++) {
                    writer.updateNumericDocValue(new Term("id", "" + i), "_soft_deletes", 1);
                }
                writer.flush();
                writer.forceMerge(1);
                try (DirectoryReader reader = DirectoryReader.open(writer)) {
                    LeafReader leafReader = reader.leaves().get(0).reader();
                    assertEquals(numDocs + 1, leafReader.maxDoc());
                    assertEquals(0, leafReader.numDocs());
                    assertNull(leafReader.terms("id"));
                    TermsEnum iterator = leafReader.terms("text").iterator();
                    assertTrue(iterator.seekExact(new BytesRef("quick")));
                    assertEquals(leafReader.maxDoc(), iterator.docFreq());
                }
            }
        }
    }
}
294189.351116elasticsearch
public void testSnapshotAndRestore() throws IOException {
    try (Directory directory = newDirectory()) {
        Path repo = createTempDir();
        Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()).put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()).putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).put("location", repo).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).build();
        int numDocs = indexDocs(directory);
        RepositoryMetadata metadata = new RepositoryMetadata("test", "fs", settings);
        FsRepository repository = new FsRepository(metadata, new Environment(settings, null), NamedXContentRegistry.EMPTY, BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
        repository.start();
        final Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "myindexUUID").build();
        IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("myindex", indexSettings);
        ShardId shardId = new ShardId(idxSettings.getIndex(), 1);
        Store store = new Store(shardId, idxSettings, directory, new DummyShardLock(shardId));
        SnapshotId snapshotId = new SnapshotId("test", "test");
        IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID());
        IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory());
        final PlainActionFuture<ShardSnapshotResult> snapshot1Future = new PlainActionFuture<>();
        IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(null);
        repository.snapshotShard(new SnapshotShardContext(store, null, snapshotId, indexId, new SnapshotIndexCommit(new Engine.IndexCommitRef(indexCommit, () -> {
        })), null, snapshotStatus, IndexVersion.current(), randomMillisUpToYear9999(), snapshot1Future));
        final ShardGeneration shardGeneration = snapshot1Future.actionGet().getGeneration();
        IndexShardSnapshotStatus.Copy snapshot1StatusCopy = snapshotStatus.asCopy();
        assertEquals(snapshot1StatusCopy.getTotalFileCount(), snapshot1StatusCopy.getIncrementalFileCount());
        Lucene.cleanLuceneIndex(directory);
        expectThrows(org.apache.lucene.index.IndexNotFoundException.class, () -> Lucene.readSegmentInfos(directory));
        DiscoveryNode localNode = DiscoveryNodeUtils.builder("foo").roles(emptySet()).build();
        ShardRouting routing = ShardRouting.newUnassigned(shardId, true, new RecoverySource.SnapshotRecoverySource("test", new Snapshot("foo", snapshotId), IndexVersion.current(), indexId), new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, ""), ShardRouting.Role.DEFAULT);
        routing = ShardRoutingHelper.initialize(routing, localNode.getId(), 0);
        RecoveryState state = new RecoveryState(routing, localNode, null);
        final PlainActionFuture<Void> restore1Future = new PlainActionFuture<>();
        repository.restoreShard(store, snapshotId, indexId, shardId, state, restore1Future);
        restore1Future.actionGet();
        assertTrue(state.getIndex().recoveredBytes() > 0);
        assertEquals(0, state.getIndex().reusedFileCount());
        assertEquals(indexCommit.getFileNames().size(), state.getIndex().recoveredFileCount());
        assertEquals(numDocs, Lucene.readSegmentInfos(directory).totalMaxDoc());
        deleteRandomDoc(store.directory());
        SnapshotId incSnapshotId = new SnapshotId("test1", "test1");
        IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory());
        Collection<String> commitFileNames = incIndexCommit.getFileNames();
        final PlainActionFuture<ShardSnapshotResult> snapshot2future = new PlainActionFuture<>();
        IndexShardSnapshotStatus snapshotStatus2 = IndexShardSnapshotStatus.newInitializing(shardGeneration);
        repository.snapshotShard(new SnapshotShardContext(store, null, incSnapshotId, indexId, new SnapshotIndexCommit(new Engine.IndexCommitRef(incIndexCommit, () -> {
        })), null, snapshotStatus2, IndexVersion.current(), randomMillisUpToYear9999(), snapshot2future));
        snapshot2future.actionGet();
        IndexShardSnapshotStatus.Copy snapshot2statusCopy = snapshotStatus2.asCopy();
        assertEquals(2, snapshot2statusCopy.getIncrementalFileCount());
        assertEquals(commitFileNames.size(), snapshot2statusCopy.getTotalFileCount());
        RecoveryState firstState = new RecoveryState(routing, localNode, null);
        final PlainActionFuture<Void> restore2Future = new PlainActionFuture<>();
        repository.restoreShard(store, snapshotId, indexId, shardId, firstState, restore2Future);
        restore2Future.actionGet();
        assertEquals("should reuse everything except of .liv and .si", commitFileNames.size() - 2, firstState.getIndex().reusedFileCount());
        RecoveryState secondState = new RecoveryState(routing, localNode, null);
        final PlainActionFuture<Void> restore3Future = new PlainActionFuture<>();
        repository.restoreShard(store, incSnapshotId, indexId, shardId, secondState, restore3Future);
        restore3Future.actionGet();
        assertEquals(secondState.getIndex().reusedFileCount(), commitFileNames.size() - 2);
        assertEquals(secondState.getIndex().recoveredFileCount(), 2);
        List<RecoveryState.FileDetail> recoveredFiles = secondState.getIndex().fileDetails().stream().filter(f -> f.reused() == false).sorted(Comparator.comparing(RecoveryState.FileDetail::name)).toList();
        assertTrue(recoveredFiles.get(0).name(), recoveredFiles.get(0).name().endsWith(".liv"));
        assertTrue(recoveredFiles.get(1).name(), recoveredFiles.get(1).name().endsWith("segments_" + incIndexCommit.getGeneration()));
    }
}
291521.8731107elasticsearch
protected TermsAggregationBuilder createTestAggregatorBuilder() {
    String name = randomAlphaOfLengthBetween(3, 20);
    TermsAggregationBuilder factory = new TermsAggregationBuilder(name);
    String field = randomAlphaOfLengthBetween(3, 20);
    randomFieldOrScript(factory, field);
    if (randomBoolean()) {
        factory.missing("MISSING");
    }
    if (randomBoolean()) {
        factory.size(randomIntBetween(1, Integer.MAX_VALUE));
    }
    if (randomBoolean()) {
        factory.shardSize(randomIntBetween(1, Integer.MAX_VALUE));
    }
    if (randomBoolean()) {
        int minDocCount = randomInt(4);
        switch(minDocCount) {
            case 0:
                break;
            case 1:
            case 2:
            case 3:
            case 4:
                minDocCount = randomIntBetween(0, Integer.MAX_VALUE);
                break;
            default:
                fail();
        }
        factory.minDocCount(minDocCount);
    }
    if (randomBoolean()) {
        int shardMinDocCount = randomInt(4);
        switch(shardMinDocCount) {
            case 0:
                break;
            case 1:
            case 2:
            case 3:
            case 4:
                shardMinDocCount = randomIntBetween(0, Integer.MAX_VALUE);
                break;
            default:
                fail();
        }
        factory.shardMinDocCount(shardMinDocCount);
    }
    if (randomBoolean()) {
        factory.collectMode(randomFrom(SubAggCollectionMode.values()));
    }
    if (randomBoolean()) {
        factory.executionHint(randomFrom(executionHints));
    }
    if (randomBoolean()) {
        factory.format("###.##");
    }
    if (randomBoolean()) {
        String includeRegexp = null, excludeRegexp = null;
        SortedSet<BytesRef> includeValues = null, excludeValues = null;
        boolean hasIncludeOrExclude = false;
        if (randomBoolean()) {
            hasIncludeOrExclude = true;
            if (randomBoolean()) {
                includeRegexp = randomAlphaOfLengthBetween(5, 10);
            } else {
                includeValues = new TreeSet<>();
                int numIncs = randomIntBetween(1, 20);
                for (int i = 0; i < numIncs; i++) {
                    includeValues.add(new BytesRef(randomAlphaOfLengthBetween(1, 30)));
                }
            }
        }
        if (randomBoolean()) {
            hasIncludeOrExclude = true;
            if (randomBoolean()) {
                excludeRegexp = randomAlphaOfLengthBetween(5, 10);
            } else {
                excludeValues = new TreeSet<>();
                int numIncs = randomIntBetween(1, 20);
                for (int i = 0; i < numIncs; i++) {
                    excludeValues.add(new BytesRef(randomAlphaOfLengthBetween(1, 30)));
                }
            }
        }
        IncludeExclude incExc;
        if (hasIncludeOrExclude) {
            incExc = new IncludeExclude(includeRegexp, excludeRegexp, includeValues, excludeValues);
        } else {
            final int numPartitions = randomIntBetween(1, 100);
            final int partition = randomIntBetween(0, numPartitions - 1);
            incExc = new IncludeExclude(partition, numPartitions);
        }
        factory.includeExclude(incExc);
    }
    if (randomBoolean()) {
        List<BucketOrder> order = randomOrder();
        if (order.size() == 1 && randomBoolean()) {
            factory.order(order.get(0));
        } else {
            factory.order(order);
        }
    }
    if (randomBoolean()) {
        factory.showTermDocCountError(randomBoolean());
    }
    return factory;
}
292981.4418100elasticsearch
public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, HttpEntity entity, Map<String, String> headers, NodeSelector nodeSelector, BiPredicate<ClientYamlSuiteRestApi, ClientYamlSuiteRestApi.Path> pathPredicate) throws IOException {
    ClientYamlSuiteRestApi restApi = restApi(apiName);
    Set<String> apiRequiredParameters = restApi.getParams().entrySet().stream().filter(Entry::getValue).map(Entry::getKey).collect(Collectors.toSet());
    List<ClientYamlSuiteRestApi.Path> bestPaths = restApi.getBestMatchingPaths(params.keySet());
    List<ClientYamlSuiteRestApi.Path> filteredPaths = bestPaths.stream().filter(path -> pathPredicate.test(restApi, path)).collect(Collectors.toUnmodifiableList());
    if (filteredPaths.isEmpty()) {
        throw new IllegalStateException(Strings.format("All possible paths [%s] for API [%s] have been skipped", Strings.collectionToCommaDelimitedString(bestPaths), apiName));
    }
    ClientYamlSuiteRestApi.Path path = RandomizedTest.randomFrom(filteredPaths);
    Map<String, String> pathParts = new HashMap<>();
    Map<String, String> queryStringParams = new HashMap<>();
    for (Map.Entry<String, String> entry : params.entrySet()) {
        if (path.parts().contains(entry.getKey())) {
            pathParts.put(entry.getKey(), entry.getValue());
        } else if (restApi.getParams().containsKey(entry.getKey()) || restSpec.isGlobalParameter(entry.getKey()) || restSpec.isClientParameter(entry.getKey())) {
            queryStringParams.put(entry.getKey(), entry.getValue());
            apiRequiredParameters.remove(entry.getKey());
        } else {
            throw new IllegalArgumentException("path/param [" + entry.getKey() + "] not supported by [" + restApi.getName() + "] " + "api");
        }
    }
    if (false == apiRequiredParameters.isEmpty()) {
        throw new IllegalArgumentException("missing required parameter: " + apiRequiredParameters + " by [" + restApi.getName() + "] api");
    }
    Set<String> partNames = pathParts.keySet();
    if (path.parts().size() != partNames.size() || path.parts().containsAll(partNames) == false) {
        throw new IllegalStateException("provided path parts don't match the best matching path: " + path.parts() + " - " + partNames);
    }
    String finalPath = path.path();
    for (Entry<String, String> pathPart : pathParts.entrySet()) {
        try {
            URI uri = new URI(null, null, null, -1, "/" + pathPart.getValue(), null, null);
            String encodedPathPart = uri.getRawPath().substring(1).replace("/", "%2F");
            finalPath = finalPath.replace("{" + pathPart.getKey() + "}", encodedPathPart);
        } catch (URISyntaxException e) {
            throw new RuntimeException("unable to build uri", e);
        }
    }
    List<String> supportedMethods = Arrays.asList(path.methods());
    String requestMethod;
    if (entity != null) {
        if (false == restApi.isBodySupported()) {
            throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api");
        }
        String contentType = entity.getContentType().getValue();
        if (sendBodyAsSourceParam(supportedMethods, contentType, entity)) {
            logger.debug("sending the request body as source param with GET method");
            queryStringParams.put("source", EntityUtils.toString(entity));
            queryStringParams.put("source_content_type", contentType);
            requestMethod = HttpGet.METHOD_NAME;
            entity = null;
        } else {
            requestMethod = RandomizedTest.randomFrom(supportedMethods);
        }
    } else {
        if (restApi.isBodyRequired()) {
            throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api");
        }
        requestMethod = RandomizedTest.randomFrom(supportedMethods);
    }
    logger.debug("calling api [{}]", apiName);
    Request request = new Request(requestMethod, finalPath);
    for (Map.Entry<String, String> param : queryStringParams.entrySet()) {
        request.addParameter(param.getKey(), param.getValue());
    }
    request.setEntity(entity);
    setOptions(request, headers);
    try {
        Response response = getRestClient(nodeSelector).performRequest(request);
        return new ClientYamlTestResponse(response);
    } catch (ResponseException e) {
        throw new ClientYamlTestResponseException(e);
    }
}
292024.813688elasticsearch
protected AutoscalingCapacity mutateInstance(AutoscalingCapacity instance) {
    AutoscalingCapacity.Builder builder = AutoscalingCapacity.builder().capacity(instance);
    if (randomBoolean()) {
        boolean hasAllMetrics = instance.total().memory() != null && instance.total().storage() != null && instance.total().processors() != null;
        if (randomBoolean()) {
            builder.total(randomByteSize(hasAllMetrics && (instance.node() == null || instance.node().storage() == null), instance.total().storage()), instance.total().memory(), instance.total().processors());
        } else if (randomBoolean()) {
            builder.total(instance.total().storage(), randomByteSize(hasAllMetrics && (instance.node() == null || instance.node().memory() == null), instance.total().memory()), instance.total().processors());
        } else {
            builder.total(instance.total().storage(), instance.total().memory(), hasAllMetrics && (instance.node() == null || instance.node().processors() == null) && randomBoolean() ? null : Optional.ofNullable(instance.total().processors()).map(p -> p.plus(randomProcessors())).orElse(randomProcessors()));
        }
    } else {
        if (instance.node() == null) {
            builder.node(AutoscalingTestCase.randomNullValueAutoscalingResources(instance.total().storage() != null, instance.total().memory() != null, instance.total().processors() != null));
        } else if (randomBoolean() && instance.total().storage() != null) {
            builder.node(randomByteSize(instance.node().memory() != null || instance.node().processors() != null, instance.node().storage()), instance.node().memory(), instance.node().processors());
        } else if (randomBoolean() && instance.total().memory() != null) {
            builder.node(instance.node().storage(), randomByteSize(instance.node().storage() != null || instance.node().processors() != null, instance.node().memory()), instance.node().processors());
        } else if (instance.total().processors() != null) {
            builder.node(instance.node().storage(), instance.node().memory(), randomBoolean() && (instance.node().storage() != null || instance.node().memory() != null) && instance.node().processors() != null ? null : Optional.ofNullable(instance.total().processors()).map(p -> p.plus(randomProcessors())).orElse(randomProcessors()));
        } else {
            ByteSizeValue newStorage = instance.total().storage() != null ? randomByteSize(instance.node().memory() != null || instance.node().processors() != null, instance.node().storage()) : null;
            ByteSizeValue newMem = instance.total().memory() != null ? randomByteSize(newStorage != null || instance.node().processors() != null, instance.node().memory()) : null;
            builder.node(newStorage, newMem, randomBoolean() && (newMem != null || newStorage != null) && instance.node().processors() != null ? null : instance.total().processors() != null && randomBoolean() ? Optional.ofNullable(instance.total().processors()).map(p -> p.plus(randomProcessors())).orElse(randomProcessors()) : null);
        }
    }
    return builder.build();
}
291787.8422108elasticsearch
public PutUserRequestBuilder source(String username, BytesReference source, XContentType xContentType, Hasher hasher) throws IOException {
    Objects.requireNonNull(xContentType);
    username(username);
    try (XContentParser parser = XContentHelper.createParserNotCompressed(LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, source, xContentType)) {
        XContentUtils.verifyObject(parser);
        XContentParser.Token token;
        String currentFieldName = null;
        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                currentFieldName = parser.currentName();
            } else if (User.Fields.PASSWORD.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    String password = parser.text();
                    try (SecureString securePassword = new SecureString(password.toCharArray())) {
                        password(securePassword, hasher);
                    }
                } else {
                    throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.PASSWORD_HASH.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    char[] passwordChars = parser.text().toCharArray();
                    passwordHash(passwordChars, hasher);
                } else {
                    throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.ROLES.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    roles(Strings.commaDelimitedListToStringArray(parser.text()));
                } else {
                    roles(XContentUtils.readStringArray(parser, false));
                }
            } else if (User.Fields.FULL_NAME.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    fullName(parser.text());
                } else if (token != XContentParser.Token.VALUE_NULL) {
                    throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.EMAIL.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_STRING) {
                    email(parser.text());
                } else if (token != XContentParser.Token.VALUE_NULL) {
                    throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.METADATA.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.START_OBJECT) {
                    metadata(parser.map());
                } else {
                    throw new ElasticsearchParseException("expected field [{}] to be of type object, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.ENABLED.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == XContentParser.Token.VALUE_BOOLEAN) {
                    enabled(parser.booleanValue());
                } else {
                    throw new ElasticsearchParseException("expected field [{}] to be of type boolean, but found [{}] instead", currentFieldName, token);
                }
            } else if (User.Fields.USERNAME.match(currentFieldName, parser.getDeprecationHandler())) {
                if (token == Token.VALUE_STRING) {
                    if (username.equals(parser.text()) == false) {
                        throw new IllegalArgumentException("[username] in source does not match the username provided [" + username + "]");
                    }
                } else {
                    throw new ElasticsearchParseException("expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
                }
            } else {
                throw new ElasticsearchParseException("failed to parse add user request. unexpected field [{}]", currentFieldName);
            }
        }
        return this;
    }
}
291521.371162elasticsearch
public void testMultiValueFields() throws Exception {
    final String bulkEntries = readResource(EqlSearchIT.class.getResourceAsStream("/eql_data.json"));
    Request bulkRequst = new Request("POST", index + "/_bulk?refresh");
    bulkRequst.setJsonEntity(bulkEntries);
    assertOK(client().performRequest(bulkRequst));
    final Set<String> availableFunctions = new EqlFunctionRegistry().listFunctions().stream().map(FunctionDefinition::name).collect(Collectors.toSet());
    Set<String> testedFunctions = new HashSet<>();
    try (RestClient client = buildClient(restClientSettings(), newNodes.stream().map(TestNode::publishAddress).toArray(HttpHost[]::new))) {
        String filterPath = "filter_path=hits.events._id";
        Request request = new Request("POST", index + "/_eql/search?" + filterPath);
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "between", "PROCESS where between(process_name, \\\"w\\\", \\\"s\\\") : \\\"indow\\\"", new int[] { 120, 121 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "cidrmatch", "PROCESS where string(cidrmatch(source_address, \\\"10.6.48.157/24\\\")) : \\\"true\\\"", new int[] { 121, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "concat", "PROCESS where concat(file_name, process_name) == \\\"foo\\\" or add(pid, ppid) > 100", new int[] { 116, 117, 120, 121, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "endswith", "PROCESS where string(endswith(process_name, \\\"s\\\")) : \\\"true\\\"", new int[] { 120, 121 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "indexof", "PROCESS where indexof(file_name, \\\"x\\\", 2) > 0", new int[] { 116, 117 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "length", "PROCESS where length(file_name) >= 3 and length(file_name) == 1", new int[] { 116 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "startswith", "PROCESS where string(startswith~(file_name, \\\"F\\\")) : \\\"true\\\"", new int[] { 116, 117, 120, 121 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "string", "PROCESS where string(concat(file_name, process_name) == \\\"foo\\\") : \\\"true\\\"", new int[] { 116, 120 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "stringcontains", "PROCESS where string(stringcontains(file_name, \\\"txt\\\")) : \\\"true\\\"", new int[] { 117 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "substring", "PROCESS where substring(file_name, -4) : \\\".txt\\\"", new int[] { 117 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "add", "PROCESS where add(pid, 1) == 2", new int[] { 120, 121, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "divide", "PROCESS where divide(pid, 12) == 1", new int[] { 116, 117, 118, 119, 120, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "modulo", "PROCESS where modulo(ppid, 10) == 0", new int[] { 121, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "multiply", "PROCESS where string(multiply(pid, 10) == 120) == \\\"true\\\"", new int[] { 116, 117, 118, 119, 120, 122 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "number", "PROCESS where number(command_line) + pid >= 360", new int[] { 122, 123 });
        assertMultiValueFunctionQuery(availableFunctions, testedFunctions, request, client, "subtract", "PROCESS where subtract(pid, 1) == 0", new int[] { 120, 121, 122 });
    }
    assertTrue(testedFunctions.containsAll(availableFunctions));
}
291860.791147elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> Math.log10(l) / Math.log10(b), 2d, Double.POSITIVE_INFINITY, 1d, Double.POSITIVE_INFINITY, List.of());
    suppliers.addAll(TestCaseSupplier.forUnaryCastingToDouble("LogConstantEvaluator", "value", Math::log, Math.nextUp(1d), Double.POSITIVE_INFINITY, List.of()));
    TestCaseSupplier.forUnaryDouble(suppliers, "LogConstantEvaluator[value=Attribute[channel=0]]", DataTypes.DOUBLE, Math::log, Math.nextUp(0d), Math.nextDown(1d), List.of());
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> Math.log10(l) / Math.log10(b), List.of(new TestCaseSupplier.TypedDataSupplier("<gt0 double>", () -> Math.nextUp(0d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<lt1 double>", () -> Math.nextDown(1d), DataTypes.DOUBLE)), List.of(new TestCaseSupplier.TypedDataSupplier("<gt0 double>", () -> Math.nextUp(0d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<lt1 double>", () -> Math.nextDown(1d), DataTypes.DOUBLE)), List.of()));
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> null, Double.NEGATIVE_INFINITY, 0d, 1d, Double.POSITIVE_INFINITY, List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: Log of non-positive number")));
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> null, 2d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: Log of non-positive number")));
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> null, 1d, 1d, 1d, Double.POSITIVE_INFINITY, List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: Log of base 1")));
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> Math.log10(l) / Math.log10(b), List.of(new TestCaseSupplier.TypedDataSupplier("<gt0 double>", () -> Math.nextUp(0d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<lt1 double>", () -> Math.nextDown(1d), DataTypes.DOUBLE)), List.of(new TestCaseSupplier.TypedDataSupplier("<gt1 double>", () -> Math.nextUp(1d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<largest double>", () -> Double.MAX_VALUE, DataTypes.DOUBLE)), List.of()));
    suppliers.addAll(TestCaseSupplier.forBinaryCastingToDouble("LogEvaluator", "base", "value", (b, l) -> Math.log10(l) / Math.log10(b), List.of(new TestCaseSupplier.TypedDataSupplier("<gt0 double>", () -> Math.nextUp(1d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<lt1 double>", () -> Double.MAX_VALUE, DataTypes.DOUBLE)), List.of(new TestCaseSupplier.TypedDataSupplier("<gt1 double>", () -> Math.nextUp(0d), DataTypes.DOUBLE), new TestCaseSupplier.TypedDataSupplier("<largest double>", () -> Math.nextDown(1d), DataTypes.DOUBLE)), List.of()));
    suppliers.addAll(TestCaseSupplier.forUnaryCastingToDouble("LogConstantEvaluator", "value", v -> null, Double.NEGATIVE_INFINITY, 0d, List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: Log of non-positive number")));
    suppliers = anyNullIsNull(true, suppliers);
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(suppliers));
}
292527.777128elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.addAll(TestCaseSupplier.forBinaryWithWidening(new TestCaseSupplier.NumericTypeTestConfigs<Number>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> l.intValue() / r.intValue(), "DivIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> l.longValue() / r.longValue(), "DivLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> {
        double v = l.doubleValue() / r.doubleValue();
        if (Double.isFinite(v)) {
            return v;
        }
        return null;
    }, "DivDoublesEvaluator")), "lhs", "rhs", (lhs, rhs) -> {
        if (lhs.type() != DataTypes.DOUBLE || rhs.type() != DataTypes.DOUBLE) {
            return List.of();
        }
        double v = ((Double) lhs.getValue()) / ((Double) rhs.getValue());
        if (Double.isFinite(v)) {
            return List.of();
        }
        return List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: / by zero");
    }, false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("DivUnsignedLongsEvaluator", "lhs", "rhs", (l, r) -> (((BigInteger) l).divide((BigInteger) r)), DataTypes.UNSIGNED_LONG, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), TestCaseSupplier.ulongCases(BigInteger.ONE, BigInteger.valueOf(Long.MAX_VALUE), true), List.of(), false));
    suppliers = errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), DivTests::divErrorMessageString);
    TestCaseSupplier.NumericTypeTestConfigs<Number> typeStuff = new TestCaseSupplier.NumericTypeTestConfigs<>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> null, "DivIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> null, "DivLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> null, "DivDoublesEvaluator"));
    List<DataType> numericTypes = List.of(DataTypes.INTEGER, DataTypes.LONG, DataTypes.DOUBLE);
    for (DataType lhsType : numericTypes) {
        for (DataType rhsType : numericTypes) {
            DataType expected = TestCaseSupplier.widen(lhsType, rhsType);
            TestCaseSupplier.NumericTypeTestConfig<Number> expectedTypeStuff = typeStuff.get(expected);
            BiFunction<DataType, DataType, Matcher<String>> evaluatorToString = (lhs, rhs) -> equalTo(expectedTypeStuff.evaluatorName() + "[" + "lhs" + "=" + TestCaseSupplier.getCastEvaluator("Attribute[channel=0]", lhs, expected) + ", " + "rhs" + "=" + TestCaseSupplier.getCastEvaluator("Attribute[channel=1]", rhs, expected) + "]");
            TestCaseSupplier.casesCrossProduct((l1, r1) -> expectedTypeStuff.expected().apply((Number) l1, (Number) r1), TestCaseSupplier.getSuppliersForNumericType(lhsType, expectedTypeStuff.min(), expectedTypeStuff.max(), true), TestCaseSupplier.getSuppliersForNumericType(rhsType, 0, 0, true), evaluatorToString, (lhs, rhs) -> List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: / by zero"), suppliers, expected, false);
        }
    }
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("DivUnsignedLongsEvaluator", "lhs", "rhs", (l, r) -> null, DataTypes.UNSIGNED_LONG, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE), true), TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.ZERO, true), List.of("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: / by zero"), false));
    return parameterSuppliersFromTypedData(suppliers);
}
291650.01160elasticsearch
public static Iterable<Object[]> parameters() {
    List<TestCaseSupplier> suppliers = new ArrayList<>();
    suppliers.addAll(TestCaseSupplier.forBinaryComparisonWithWidening(new TestCaseSupplier.NumericTypeTestConfigs<>(new TestCaseSupplier.NumericTypeTestConfig<>((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1, (l, r) -> l.intValue() == r.intValue(), "EqualsIntsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1, (l, r) -> l.longValue() == r.longValue(), "EqualsLongsEvaluator"), new TestCaseSupplier.NumericTypeTestConfig<>(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, (l, r) -> l.doubleValue() == r.doubleValue(), "EqualsDoublesEvaluator")), "lhs", "rhs", (lhs, rhs) -> List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsLongsEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true), TestCaseSupplier.ulongCases(BigInteger.ZERO, NumericUtils.UNSIGNED_LONG_MAX, true), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsBoolsEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.booleanCases(), TestCaseSupplier.booleanCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsKeywordsEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.ipCases(), TestCaseSupplier.ipCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsKeywordsEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.versionCases(""), TestCaseSupplier.versionCases(""), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsLongsEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.stringCases(Object::equals, (lhsType, rhsType) -> "EqualsKeywordsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", List.of(), DataTypes.BOOLEAN));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsGeometriesEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.geoPointCases(), TestCaseSupplier.geoPointCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsGeometriesEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.geoShapeCases(), TestCaseSupplier.geoShapeCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsGeometriesEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.cartesianPointCases(), TestCaseSupplier.cartesianPointCases(), List.of(), false));
    suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("EqualsGeometriesEvaluator", "lhs", "rhs", Object::equals, DataTypes.BOOLEAN, TestCaseSupplier.cartesianShapeCases(), TestCaseSupplier.cartesianShapeCases(), List.of(), false));
    return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), AbstractFunctionTestCase::errorMessageStringForBinaryOperators));
}
293113.431126elasticsearch
public void testCanMatch() throws IOException {
    createIndex("index");
    prepareIndex("index").setId("1").setSource("field", "2010-01-05T02:00").setRefreshPolicy(IMMEDIATE).get();
    prepareIndex("index").setId("2").setSource("field", "2010-01-06T02:00").setRefreshPolicy(IMMEDIATE).get();
    {
        IndicesService indexServices = getInstanceFromNode(IndicesService.class);
        Index index = resolveIndex("index");
        IndexService indexService = indexServices.indexServiceSafe(index);
        IndexShard shard = indexService.getShard(0);
        assertFalse(indexService.getIndexSettings().isSearchThrottled());
        SearchService searchService = getInstanceFromNode(SearchService.class);
        SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true);
        assertTrue(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        searchRequest.source(sourceBuilder);
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d"));
        assertTrue(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00"));
        assertFalse(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
    }
    assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("index")).actionGet());
    {
        IndicesService indexServices = getInstanceFromNode(IndicesService.class);
        Index index = resolveIndex("index");
        IndexService indexService = indexServices.indexServiceSafe(index);
        IndexShard shard = indexService.getShard(0);
        assertTrue(indexService.getIndexSettings().isSearchThrottled());
        SearchService searchService = getInstanceFromNode(SearchService.class);
        SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true);
        assertTrue(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
        SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d"));
        searchRequest.source(sourceBuilder);
        assertTrue(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00"));
        assertFalse(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null)).canMatch());
        IndicesStatsResponse response = indicesAdmin().prepareStats("index").clear().setRefresh(true).get();
        assertEquals(0, response.getTotal().refresh.getTotal());
        PlainActionFuture<ShardSearchContextId> openContextFuture = new PlainActionFuture<>();
        searchService.openReaderContext(shard.shardId(), TimeValue.timeValueSeconds(60), openContextFuture);
        final ShardSearchContextId contextId = openContextFuture.actionGet(TimeValue.timeValueSeconds(60));
        assertNotNull(contextId.getSearcherId());
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00"));
        assertFalse(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null, contextId, null)).canMatch());
        assertTrue(searchService.freeReaderContext(contextId));
        sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00"));
        assertFalse(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null, contextId, null)).canMatch());
        expectThrows(SearchContextMissingException.class, () -> {
            ShardSearchContextId withoutCommitId = new ShardSearchContextId(contextId.getSessionId(), contextId.getId(), null);
            sourceBuilder.query(QueryBuilders.rangeQuery("field").gt("2010-01-06T02:00").lt("2010-01-07T02:00"));
            assertFalse(searchService.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, shard.shardId(), 0, 1, AliasFilter.EMPTY, 1f, -1, null, withoutCommitId, null)).canMatch());
        });
    }
}
292847.721134elasticsearch
public void testInferModelMultiClassModel() throws Exception {
    String modelId = "test-load-models-classification-multi";
    Map<String, String> oneHotEncoding = new HashMap<>();
    oneHotEncoding.put("cat", "animal_cat");
    oneHotEncoding.put("dog", "animal_dog");
    TrainedModelConfig config = buildTrainedModelConfigBuilder(modelId).setInput(new TrainedModelInput(Arrays.asList("field.foo", "field.bar", "other.categorical"))).setParsedDefinition(new TrainedModelDefinition.Builder().setPreProcessors(Arrays.asList(new OneHotEncoding("other.categorical", oneHotEncoding, false))).setTrainedModel(buildMultiClassClassification())).setVersion(MlConfigVersion.CURRENT).setLicenseLevel(License.OperationMode.PLATINUM.description()).setCreateTime(Instant.now()).setEstimatedOperations(0).setModelSize(0).build();
    AtomicReference<Boolean> putConfigHolder = new AtomicReference<>();
    AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
    blockingCall(listener -> trainedModelProvider.storeTrainedModel(config, listener), putConfigHolder, exceptionHolder);
    assertThat(putConfigHolder.get(), is(true));
    assertThat(exceptionHolder.get(), is(nullValue()));
    List<Map<String, Object>> toInfer = new ArrayList<>();
    toInfer.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 1.0);
                    put("bar", 0.5);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "dog");
                }
            });
        }
    });
    toInfer.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 0.9);
                    put("bar", 1.5);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "cat");
                }
            });
        }
    });
    List<Map<String, Object>> toInfer2 = new ArrayList<>();
    toInfer2.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 0.0);
                    put("bar", 0.01);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "dog");
                }
            });
        }
    });
    toInfer2.add(new HashMap<>() {

        {
            put("field", new HashMap<>() {

                {
                    put("foo", 1.0);
                    put("bar", 0.0);
                }
            });
            put("other", new HashMap<>() {

                {
                    put("categorical", "cat");
                }
            });
        }
    });
    InferModelAction.Request request = InferModelAction.Request.forIngestDocs(modelId, toInfer, ClassificationConfigUpdate.EMPTY_PARAMS, true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    InferModelAction.Response response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    assertThat(response.getInferenceResults().stream().map(i -> ((SingleValueInferenceResults) i).valueAsString()).collect(Collectors.toList()), contains("option_0", "option_2"));
    request = InferModelAction.Request.forIngestDocs(modelId, toInfer2, ClassificationConfigUpdate.EMPTY_PARAMS, true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    assertThat(response.getInferenceResults().stream().map(i -> ((SingleValueInferenceResults) i).valueAsString()).collect(Collectors.toList()), contains("option_2", "option_0"));
    request = InferModelAction.Request.forIngestDocs(modelId, toInfer, new ClassificationConfigUpdate(3, null, null, null, null), true, InferModelAction.Request.DEFAULT_TIMEOUT_FOR_INGEST);
    response = client().execute(InferModelAction.INSTANCE, request).actionGet();
    ClassificationInferenceResults classificationInferenceResults = (ClassificationInferenceResults) response.getInferenceResults().get(0);
    assertThat(classificationInferenceResults.getTopClasses().get(0).getClassification(), equalTo("option_0"));
    assertThat(classificationInferenceResults.getTopClasses().get(1).getClassification(), equalTo("option_2"));
    assertThat(classificationInferenceResults.getTopClasses().get(2).getClassification(), equalTo("option_1"));
    classificationInferenceResults = (ClassificationInferenceResults) response.getInferenceResults().get(1);
    assertThat(classificationInferenceResults.getTopClasses().get(0).getClassification(), equalTo("option_2"));
    assertThat(classificationInferenceResults.getTopClasses().get(1).getClassification(), equalTo("option_0"));
    assertThat(classificationInferenceResults.getTopClasses().get(2).getClassification(), equalTo("option_1"));
}
292061.8210132elasticsearch
protected void masterOperation(Task task, SetUpgradeModeAction.Request request, ClusterState state, ActionListener<AcknowledgedResponse> listener) throws Exception {
    if (isRunning.compareAndSet(false, true) == false) {
        String msg = "Attempted to set [upgrade_mode] to [" + request.isEnabled() + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + "] while previous request was processing.";
        logger.info(msg);
        Exception detail = new IllegalStateException(msg);
        listener.onFailure(new ElasticsearchStatusException("Cannot change [upgrade_mode]. Previous request is still being processed.", RestStatus.TOO_MANY_REQUESTS, detail));
        return;
    }
    if (request.isEnabled() == MlMetadata.getMlMetadata(state).isUpgradeMode()) {
        logger.info("Upgrade mode noop");
        isRunning.set(false);
        listener.onResponse(AcknowledgedResponse.TRUE);
        return;
    }
    logger.info("Starting to set [upgrade_mode] to [" + request.isEnabled() + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + "]");
    ActionListener<AcknowledgedResponse> wrappedListener = ActionListener.wrap(r -> {
        logger.info("Completed upgrade mode request");
        isRunning.set(false);
        listener.onResponse(r);
    }, e -> {
        logger.info("Completed upgrade mode request but with failure", e);
        isRunning.set(false);
        listener.onFailure(e);
    });
    final PersistentTasksCustomMetadata tasksCustomMetadata = state.metadata().custom(PersistentTasksCustomMetadata.TYPE);
    ActionListener<List<PersistentTask<?>>> unassignPersistentTasksListener = ActionListener.wrap(unassignedPersistentTasks -> {
        client.admin().cluster().prepareListTasks().setActions(ML_TASK_NAMES.stream().map(taskName -> taskName + "[c]").toArray(String[]::new)).setWaitForCompletion(true).setTimeout(request.ackTimeout()).execute(ActionListener.wrap(r -> {
            try {
                logger.info("Waited for tasks to be unassigned");
                if (r.getNodeFailures().isEmpty() == false) {
                    logger.info("There were node failures waiting for tasks", r.getNodeFailures().get(0));
                }
                rethrowAndSuppress(r.getNodeFailures());
                wrappedListener.onResponse(AcknowledgedResponse.TRUE);
            } catch (ElasticsearchException ex) {
                logger.info("Caught node failures waiting for tasks to be unassigned", ex);
                wrappedListener.onFailure(ex);
            }
        }, wrappedListener::onFailure));
    }, wrappedListener::onFailure);
    ActionListener<List<IsolateDatafeedAction.Response>> isolateDatafeedListener = ActionListener.wrap(isolatedDatafeeds -> {
        logger.info("Isolated the datafeeds");
        unassignPersistentTasks(tasksCustomMetadata, unassignPersistentTasksListener);
    }, wrappedListener::onFailure);
    ActionListener<AcknowledgedResponse> clusterStateUpdateListener = ActionListener.wrap(acknowledgedResponse -> {
        if (acknowledgedResponse.isAcknowledged() == false) {
            logger.info("Cluster state update is NOT acknowledged");
            wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state"));
            return;
        }
        if (tasksCustomMetadata == null || tasksCustomMetadata.tasks().isEmpty()) {
            logger.info("No tasks to worry about after state update");
            wrappedListener.onResponse(AcknowledgedResponse.TRUE);
            return;
        }
        if (request.isEnabled()) {
            logger.info("Enabling upgrade mode, must isolate datafeeds");
            isolateDatafeeds(tasksCustomMetadata, isolateDatafeedListener);
        } else {
            logger.info("Disabling upgrade mode, must wait for tasks to not have AWAITING_UPGRADE assignment");
            persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetadata -> persistentTasksCustomMetadata.tasks().stream().noneMatch(t -> ML_TASK_NAMES.contains(t.getTaskName()) && t.getAssignment().equals(AWAITING_UPGRADE)), request.ackTimeout(), ActionListener.wrap(r -> {
                logger.info("Done waiting for tasks to be out of AWAITING_UPGRADE");
                wrappedListener.onResponse(AcknowledgedResponse.TRUE);
            }, wrappedListener::onFailure));
        }
    }, wrappedListener::onFailure);
    submitUnbatchedTask("ml-set-upgrade-mode", new AckedClusterStateUpdateTask(request, clusterStateUpdateListener) {

        @Override
        protected AcknowledgedResponse newResponse(boolean acknowledged) {
            logger.trace("Cluster update response built: " + acknowledged);
            return AcknowledgedResponse.of(acknowledged);
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            logger.trace("Executing cluster state update");
            MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metadata().custom(MlMetadata.TYPE));
            builder.isUpgradeMode(request.isEnabled());
            ClusterState.Builder newState = ClusterState.builder(currentState);
            newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(MlMetadata.TYPE, builder.build()).build());
            return newState.build();
        }
    });
}
292309.7612121elasticsearch
private void normalStopDatafeed(Task task, StopDatafeedAction.Request request, ActionListener<StopDatafeedAction.Response> listener, PersistentTasksCustomMetadata tasks, DiscoveryNodes nodes, List<String> startedDatafeeds, List<String> stoppingDatafeeds, int attempt) {
    final Set<String> executorNodes = new HashSet<>();
    final List<String> startedDatafeedsJobs = new ArrayList<>();
    final List<String> resolvedStartedDatafeeds = new ArrayList<>();
    final List<PersistentTasksCustomMetadata.PersistentTask<?>> allDataFeedsToWaitFor = new ArrayList<>();
    for (String datafeedId : startedDatafeeds) {
        PersistentTasksCustomMetadata.PersistentTask<?> datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks);
        if (datafeedTask == null) {
            String msg = "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found.";
            assert datafeedTask != null : msg;
            logger.error(msg);
        } else if (PersistentTasksClusterService.needsReassignment(datafeedTask.getAssignment(), nodes) == false) {
            startedDatafeedsJobs.add(((StartDatafeedAction.DatafeedParams) datafeedTask.getParams()).getJobId());
            resolvedStartedDatafeeds.add(datafeedId);
            executorNodes.add(datafeedTask.getExecutorNode());
            allDataFeedsToWaitFor.add(datafeedTask);
        } else {
            persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, ActionListener.wrap(r -> auditDatafeedStopped(datafeedTask), e -> logger.error("[" + datafeedId + "] failed to remove task to stop unassigned datafeed", e)));
            allDataFeedsToWaitFor.add(datafeedTask);
        }
    }
    for (String datafeedId : stoppingDatafeeds) {
        PersistentTasksCustomMetadata.PersistentTask<?> datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks);
        assert datafeedTask != null : "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found.";
        allDataFeedsToWaitFor.add(datafeedTask);
    }
    request.setResolvedStartedDatafeedIds(resolvedStartedDatafeeds.toArray(new String[0]));
    request.setNodes(executorNodes.toArray(new String[0]));
    final Set<String> movedDatafeeds = ConcurrentCollections.newConcurrentSet();
    ActionListener<StopDatafeedAction.Response> finalListener = ActionListener.wrap(response -> waitForDatafeedStopped(allDataFeedsToWaitFor, request, response, ActionListener.wrap(finished -> {
        for (String datafeedId : movedDatafeeds) {
            PersistentTasksCustomMetadata.PersistentTask<?> datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks);
            persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, ActionListener.wrap(r -> auditDatafeedStopped(datafeedTask), e -> {
                if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) {
                    logger.debug("[{}] relocated datafeed task already removed", datafeedId);
                } else {
                    logger.error("[" + datafeedId + "] failed to remove task to stop relocated datafeed", e);
                }
            }));
        }
        if (startedDatafeedsJobs.isEmpty()) {
            listener.onResponse(finished);
            return;
        }
        client.admin().indices().prepareRefresh(startedDatafeedsJobs.stream().map(AnomalyDetectorsIndex::jobResultsAliasedName).toArray(String[]::new)).execute(ActionListener.wrap(_unused -> listener.onResponse(finished), ex -> {
            logger.warn(() -> format("failed to refresh job [%s] results indices when stopping datafeeds [%s]", startedDatafeedsJobs, startedDatafeeds), ex);
            listener.onResponse(finished);
        }));
    }, listener::onFailure), movedDatafeeds), e -> {
        Throwable unwrapped = ExceptionsHelper.unwrapCause(e);
        if (unwrapped instanceof FailedNodeException) {
            if (attempt <= MAX_ATTEMPTS) {
                logger.warn("Node [{}] failed while processing stop datafeed request - retrying", ((FailedNodeException) unwrapped).nodeId());
                doExecute(task, request, listener, attempt + 1);
            } else {
                listener.onFailure(e);
            }
        } else if (unwrapped instanceof RetryStopDatafeedException) {
            if (attempt <= MAX_ATTEMPTS) {
                logger.info("Insufficient responses while processing stop datafeed request [{}] - retrying", unwrapped.getMessage());
                threadPool.schedule(() -> doExecute(task, request, listener, attempt + 1), TimeValue.timeValueMillis(100L * attempt), EsExecutors.DIRECT_EXECUTOR_SERVICE);
            } else {
                listener.onFailure(ExceptionsHelper.serverError("Failed to stop datafeed [" + request.getDatafeedId() + "] after " + MAX_ATTEMPTS + " due to inconsistencies between local and persistent tasks within the cluster"));
            }
        } else {
            listener.onFailure(e);
        }
    });
    super.doExecute(task, request, finalListener);
}
292634.497126elasticsearch
protected void doExecute(ActionListener<StepResponse> listener) {
    task.getStatsHolder().getProgressTracker().updateReindexingProgress(1);
    final ParentTaskAssigningClient parentTaskClient = parentTaskClient();
    ActionListener<BulkByScrollResponse> reindexCompletedListener = ActionListener.wrap(reindexResponse -> {
        if (isTaskStopping()) {
            LOGGER.debug("[{}] task is stopping. Stopping reindexing before it is finished.", config.getId());
            listener.onResponse(new StepResponse(true));
            return;
        }
        synchronized (this) {
            reindexingTaskId = null;
        }
        Exception reindexError = getReindexError(config.getId(), reindexResponse);
        if (reindexError != null) {
            listener.onFailure(reindexError);
            return;
        }
        auditor.info(config.getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_FINISHED_REINDEXING, config.getDest().getIndex(), reindexResponse.getTook()));
        isReindexingFinished = true;
        task.getStatsHolder().getProgressTracker().updateReindexingProgress(100);
        LOGGER.debug("[{}] Reindex completed; created [{}]; retries [{}]", config.getId(), reindexResponse.getCreated(), reindexResponse.getBulkRetries());
        listener.onResponse(new StepResponse(false));
    }, error -> {
        if (isTaskStopping() && isTaskCancelledException(error)) {
            LOGGER.debug(() -> "[" + config.getId() + "] Caught task cancelled exception while task is stopping", error);
            listener.onResponse(new StepResponse(true));
        } else {
            listener.onFailure(error);
        }
    });
    ActionListener<CreateIndexResponse> copyIndexCreatedListener = ActionListener.wrap(createIndexResponse -> {
        ReindexRequest reindexRequest = new ReindexRequest();
        reindexRequest.setRefresh(true);
        reindexRequest.setSourceIndices(config.getSource().getIndex());
        reindexRequest.setSourceQuery(config.getSource().getParsedQuery());
        reindexRequest.getSearchRequest().allowPartialSearchResults(false);
        reindexRequest.getSearchRequest().source().fetchSource(config.getSource().getSourceFiltering());
        reindexRequest.getSearchRequest().source().sort(SeqNoFieldMapper.NAME, SortOrder.ASC);
        reindexRequest.setDestIndex(config.getDest().getIndex());
        reindexRequest.setSlices(1);
        Map<String, Object> counterValueParam = new HashMap<>();
        counterValueParam.put("value", -1);
        reindexRequest.setScript(new Script(Script.DEFAULT_SCRIPT_TYPE, Script.DEFAULT_SCRIPT_LANG, "ctx._source." + DestinationIndex.INCREMENTAL_ID + " = ++params.counter.value", Collections.singletonMap("counter", counterValueParam)));
        reindexRequest.setParentTask(getParentTaskId());
        final ThreadContext threadContext = parentTaskClient.threadPool().getThreadContext();
        final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
        try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(ML_ORIGIN)) {
            synchronized (this) {
                if (isTaskStopping()) {
                    LOGGER.debug("[{}] task is stopping. Stopping reindexing before it is finished.", config.getId());
                    listener.onResponse(new StepResponse(true));
                    return;
                }
                LOGGER.info("[{}] Started reindexing", config.getId());
                Task reindexTask = client.executeLocally(ReindexAction.INSTANCE, reindexRequest, new ContextPreservingActionListener<>(supplier, reindexCompletedListener));
                reindexingTaskId = reindexTask.getId();
            }
            auditor.info(config.getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_STARTED_REINDEXING, config.getDest().getIndex()));
        }
    }, reindexCompletedListener::onFailure);
    ActionListener<GetIndexResponse> destIndexListener = ActionListener.wrap(indexResponse -> {
        auditor.info(config.getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_REUSING_DEST_INDEX, indexResponse.indices()[0]));
        LOGGER.info("[{}] Using existing destination index [{}]", config.getId(), indexResponse.indices()[0]);
        DestinationIndex.updateMappingsToDestIndex(parentTaskClient, config, indexResponse, ActionListener.wrap(acknowledgedResponse -> copyIndexCreatedListener.onResponse(null), copyIndexCreatedListener::onFailure));
    }, e -> {
        if (ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) {
            auditor.info(config.getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_CREATING_DEST_INDEX, config.getDest().getIndex()));
            LOGGER.info("[{}] Creating destination index [{}]", config.getId(), config.getDest().getIndex());
            DestinationIndex.createDestinationIndex(parentTaskClient, Clock.systemUTC(), config, destIndexAllowedSettings, copyIndexCreatedListener);
        } else {
            copyIndexCreatedListener.onFailure(e);
        }
    });
    ClientHelper.executeWithHeadersAsync(config.getHeaders(), ML_ORIGIN, parentTaskClient, GetIndexAction.INSTANCE, new GetIndexRequest().indices(config.getDest().getIndex()), destIndexListener);
}
292816.182124elasticsearch
public void testPruning() throws IOException {
    transactionStore = new HashBasedTransactionStore(mockBigArrays());
    Field field = createKeywordFieldTestInstance("field", 0);
    transactionStore.add(Stream.of(tuple(field, List.of("a", "d", "f")), tuple(field, List.of("a", "c", "d", "e")), tuple(field, List.of("b", "d")), tuple(field, List.of("b", "c", "d")), tuple(field, List.of("b", "c")), tuple(field, List.of("a", "b", "d")), tuple(field, List.of("b", "d", "e")), tuple(field, List.of("b", "c", "e", "g")), tuple(field, List.of("c", "d", "f")), tuple(field, List.of("a", "b", "d"))));
    transactionStore.prune(0.1);
    try (TopItemIds topItemIds = transactionStore.getTopItemIds()) {
        ItemSetTraverser it = new ItemSetTraverser(topItemIds);
        assertTrue(it.next());
        assertEquals("d", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(1, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.next());
        assertEquals("b", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(2, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("c", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(3, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("a", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("e", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.getItemSetBitSet().get(3));
        assertTrue(it.getItemSetBitSet().get(4));
        assertTrue(it.getItemSetBitSet().get(5));
        assertTrue(it.getParentItemSetBitSet().get(1));
        assertTrue(it.getParentItemSetBitSet().get(2));
        assertTrue(it.getParentItemSetBitSet().get(3));
        assertTrue(it.getParentItemSetBitSet().get(4));
        assertFalse(it.getParentItemSetBitSet().get(5));
        it.prune();
        assertTrue(it.next());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.getItemSetBitSet().get(3));
        assertTrue(it.getItemSetBitSet().get(4));
        assertFalse(it.getItemSetBitSet().get(5));
        assertTrue(it.getItemSetBitSet().get(6));
        assertTrue(it.getParentItemSetBitSet().get(1));
        assertTrue(it.getParentItemSetBitSet().get(2));
        assertTrue(it.getParentItemSetBitSet().get(3));
        assertTrue(it.getParentItemSetBitSet().get(4));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(6, it.getNumberOfItems());
        it.prune();
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        assertTrue(it.next());
        assertEquals("e", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        it.prune();
        assertTrue(it.next());
        assertEquals("f", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(4, it.getNumberOfItems());
        assertTrue(it.getItemSetBitSet().get(1));
        assertTrue(it.getItemSetBitSet().get(2));
        assertTrue(it.getItemSetBitSet().get(3));
        assertFalse(it.getItemSetBitSet().get(4));
        assertFalse(it.getItemSetBitSet().get(5));
        assertTrue(it.getItemSetBitSet().get(6));
        assertTrue(it.getParentItemSetBitSet().get(1));
        assertTrue(it.getParentItemSetBitSet().get(2));
        assertTrue(it.getParentItemSetBitSet().get(3));
        assertFalse(it.getParentItemSetBitSet().get(4));
        assertFalse(it.getParentItemSetBitSet().get(5));
        assertFalse(it.getParentItemSetBitSet().get(6));
        assertTrue(it.next());
        assertEquals("g", transactionStore.getItem(it.getItemId()).v2());
        assertEquals(5, it.getNumberOfItems());
        it.prune();
        it.prune();
        it.prune();
        it.prune();
        it.prune();
        it.prune();
        it.prune();
        int furtherSteps = 0;
        while (it.next()) {
            ++furtherSteps;
        }
        assertEquals(0, furtherSteps);
    }
}
294536.01117elasticsearch
public void testGivenPreviousAssignments() {
    {
        Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8);
        Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8);
        Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, 0, 0);
        Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0);
        PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(deployment1, deployment2));
        List<Node> nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations();
        assertThat(nodesPreservingAllocations, hasSize(2));
        assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1"));
        assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L));
        assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3));
        assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2"));
        assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0));
        List<AssignmentPlan.Deployment> modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations();
        assertThat(modelsPreservingAllocations, hasSize(2));
        assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
        assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
        assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
        assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3));
        assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0)));
        AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)).assignModelToNode(deployment1, node1, 2).build();
        assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 2)));
        assertThat(plan.assignments(deployment2), isEmpty());
        plan = preserveAllAllocations.mergePreservedAllocations(plan);
        assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 3)));
        assertThat(plan.assignments(deployment2), isPresentWith(Map.of(node1, 1, node2, 2)));
        assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L));
        assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1));
        assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0));
    }
    {
        Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8);
        Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8);
        Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes());
        Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes());
        PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node1, node2), List.of(deployment1, deployment2));
        List<Node> nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations();
        assertThat(nodesPreservingAllocations, hasSize(2));
        assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1"));
        assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3));
        assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2"));
        assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes()));
        assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0));
        List<AssignmentPlan.Deployment> modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations();
        assertThat(modelsPreservingAllocations, hasSize(2));
        assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
        assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
        assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
        assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3));
        assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0)));
        AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)).assignModelToNode(deployment1, node1, 2).build();
        assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 2)));
        assertThat(plan.assignments(deployment2), isEmpty());
        plan = preserveAllAllocations.mergePreservedAllocations(plan);
        assertThat(plan.assignments(deployment1), isPresentWith(Map.of(node1, 3)));
        assertThat(plan.assignments(deployment2), isPresentWith(Map.of(node1, 1, node2, 2)));
        assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1));
        assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0));
    }
}
294965.81114elasticsearch
public void testGivenPreviousAssignments() {
    {
        Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8);
        Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8);
        Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, 0, 0);
        Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0);
        PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2));
        List<Node> nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations();
        assertThat(nodesPreservingAllocations, hasSize(2));
        assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1"));
        assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L));
        assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3));
        assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2"));
        assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4));
        List<AssignmentPlan.Deployment> modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations();
        assertThat(modelsPreservingAllocations, hasSize(2));
        assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
        assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
        assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
        assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1)));
        AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)).assignModelToNode(deployment1, node1, 2).assignModelToNode(deployment2, node2, 1).build();
        assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2)));
        assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1)));
        plan = preserveOneAllocation.mergePreservedAllocations(plan);
        assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3)));
        assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2)));
        assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L));
        assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1));
        assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0));
    }
    {
        Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8);
        Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8);
        Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes());
        Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes());
        PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2));
        List<Node> nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations();
        assertThat(nodesPreservingAllocations, hasSize(2));
        assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1"));
        assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3));
        assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2"));
        assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes()));
        assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4));
        List<AssignmentPlan.Deployment> modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations();
        assertThat(modelsPreservingAllocations, hasSize(2));
        assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1"));
        assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
        assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1));
        assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0)));
        assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2"));
        assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes()));
        assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4));
        assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1)));
        AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)).assignModelToNode(deployment1, node1, 2).assignModelToNode(deployment2, node2, 1).build();
        assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2)));
        assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1)));
        plan = preserveOneAllocation.mergePreservedAllocations(plan);
        assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3)));
        assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2)));
        assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1));
        assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes()));
        assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0));
    }
}
293141.586123elasticsearch
public void testSearchableSnapshotShardsThatHaveMatchingDataAreNotSkippedOnTheCoordinatingNode() throws Exception {
    internalCluster().startMasterOnlyNode();
    internalCluster().startCoordinatingOnlyNode(Settings.EMPTY);
    final String dataNodeHoldingRegularIndex = internalCluster().startDataOnlyNode();
    final String dataNodeHoldingSearchableSnapshot = internalCluster().startDataOnlyNode();
    final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNodeHoldingSearchableSnapshot);
    final String indexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    final int indexWithinSearchRangeShardCount = randomIntBetween(1, 3);
    createIndexWithTimestamp(indexWithinSearchRange, indexWithinSearchRangeShardCount, Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingRegularIndex).build());
    indexDocumentsWithTimestampWithinDate(indexWithinSearchRange, between(1, 1000), TIMESTAMP_TEMPLATE_WITHIN_RANGE);
    final String repositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    createRepository(repositoryName, "mock");
    final SnapshotId snapshotId = createSnapshot(repositoryName, "snapshot-1", List.of(indexWithinSearchRange)).snapshotId();
    assertAcked(indicesAdmin().prepareDelete(indexWithinSearchRange));
    final String searchableSnapshotIndexWithinSearchRange = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    blockDataNode(repositoryName, dataNodeHoldingSearchableSnapshot);
    Settings restoredIndexSettings = Settings.builder().put(INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), dataNodeHoldingSearchableSnapshot).build();
    final MountSearchableSnapshotRequest mountRequest = new MountSearchableSnapshotRequest(searchableSnapshotIndexWithinSearchRange, repositoryName, snapshotId.getName(), indexWithinSearchRange, restoredIndexSettings, Strings.EMPTY_ARRAY, false, randomFrom(MountSearchableSnapshotRequest.Storage.values()));
    client().execute(MountSearchableSnapshotAction.INSTANCE, mountRequest).actionGet();
    final IndexMetadata indexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange);
    assertThat(indexMetadata.getTimestampRange(), equalTo(IndexLongFieldRange.NO_SHARDS));
    DateFieldMapper.DateFieldType timestampFieldType = indicesService.getTimestampFieldType(indexMetadata.getIndex());
    assertThat(timestampFieldType, nullValue());
    RangeQueryBuilder rangeQuery = QueryBuilders.rangeQuery(DataStream.TIMESTAMP_FIELD_NAME).from("2020-11-28T00:00:00.000000000Z", true).to("2020-11-29T00:00:00.000000000Z");
    SearchRequest request = new SearchRequest().indices(searchableSnapshotIndexWithinSearchRange).source(new SearchSourceBuilder().query(rangeQuery));
    expectThrows(SearchPhaseExecutionException.class, () -> {
        SearchResponse response = client().search(request).actionGet();
        logger.info("[TEST DEBUG INFO] Search hits: {} Successful shards: {}, failed shards: {}, skipped shards: {}, total shards: {}", response.getHits().getTotalHits().value, response.getSuccessfulShards(), response.getFailedShards(), response.getSkippedShards(), response.getTotalShards());
        fail("This search call is expected to throw an exception but it did not");
    });
    boolean allowPartialSearchResults = false;
    SearchShardsRequest searchShardsRequest = new SearchShardsRequest(new String[] { searchableSnapshotIndexWithinSearchRange }, SearchRequest.DEFAULT_INDICES_OPTIONS, rangeQuery, null, null, allowPartialSearchResults, null);
    {
        SearchShardsResponse searchShardsResponse = null;
        try {
            searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
        } catch (SearchPhaseExecutionException e) {
        }
        if (searchShardsResponse != null) {
            for (SearchShardsGroup group : searchShardsResponse.getGroups()) {
                assertFalse("no shard should be marked as skipped", group.skipped());
            }
        }
    }
    unblockNode(repositoryName, dataNodeHoldingSearchableSnapshot);
    waitUntilRecoveryIsDone(searchableSnapshotIndexWithinSearchRange);
    ensureGreen(searchableSnapshotIndexWithinSearchRange);
    final IndexMetadata updatedIndexMetadata = getIndexMetadata(searchableSnapshotIndexWithinSearchRange);
    final IndexLongFieldRange updatedTimestampMillisRange = updatedIndexMetadata.getTimestampRange();
    final DateFieldMapper.DateFieldType dateFieldType = indicesService.getTimestampFieldType(updatedIndexMetadata.getIndex());
    assertThat(dateFieldType, notNullValue());
    final DateFieldMapper.Resolution resolution = dateFieldType.resolution();
    assertThat(updatedTimestampMillisRange.isComplete(), equalTo(true));
    assertThat(updatedTimestampMillisRange, not(sameInstance(IndexLongFieldRange.EMPTY)));
    assertThat(updatedTimestampMillisRange.getMin(), greaterThanOrEqualTo(resolution.convert(Instant.parse("2020-11-28T00:00:00Z"))));
    assertThat(updatedTimestampMillisRange.getMax(), lessThanOrEqualTo(resolution.convert(Instant.parse("2020-11-29T00:00:00Z"))));
    internalCluster().stopNode(dataNodeHoldingSearchableSnapshot);
    waitUntilAllShardsAreUnassigned(updatedIndexMetadata.getIndex());
    expectThrows(SearchPhaseExecutionException.class, () -> client().search(request).actionGet());
    {
        SearchShardsResponse searchShardsResponse = null;
        try {
            searchShardsResponse = client().execute(TransportSearchShardsAction.TYPE, searchShardsRequest).actionGet();
        } catch (SearchPhaseExecutionException e) {
        }
        if (searchShardsResponse != null) {
            assertThat(searchShardsResponse.getGroups().size(), equalTo(indexWithinSearchRangeShardCount));
            List<List<SearchShardsGroup>> partitionedBySkipped = searchShardsResponse.getGroups().stream().collect(Collectors.teeing(Collectors.filtering(g -> g.skipped(), Collectors.toList()), Collectors.filtering(g -> g.skipped() == false, Collectors.toList()), List::of));
            List<SearchShardsGroup> skipped = partitionedBySkipped.get(0);
            List<SearchShardsGroup> notSkipped = partitionedBySkipped.get(1);
            assertThat(skipped.size(), equalTo(0));
            assertThat(notSkipped.size(), equalTo(indexWithinSearchRangeShardCount));
        }
    }
}
293873.7110100elasticsearch
public void testBuildRemoteClusterClientBootStrapOptions() {
    final Settings settings1 = Settings.builder().build();
    final var options1 = RemoteClusterClientBootstrapOptions.fromSettings(settings1);
    assertThat(options1.isEmpty(), is(true));
    final Settings settings2 = Settings.builder().put(TransportSettings.TCP_NO_DELAY.getKey(), randomBoolean()).put(TransportSettings.TCP_KEEP_ALIVE.getKey(), randomBoolean()).put(TransportSettings.TCP_KEEP_IDLE.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_INTERVAL.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_COUNT.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_SEND_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(randomIntBetween(-1, 1000))).put(TransportSettings.TCP_RECEIVE_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(randomIntBetween(-1, 1000))).put(TransportSettings.TCP_REUSE_ADDRESS.getKey(), randomBoolean()).build();
    final var options2 = RemoteClusterClientBootstrapOptions.fromSettings(settings2);
    assertThat(options2.isEmpty(), is(true));
    final Settings.Builder builder3 = Settings.builder();
    if (randomBoolean()) {
        builder3.put(TransportSettings.TCP_NO_DELAY.getKey(), true).put(TransportSettings.TCP_KEEP_ALIVE.getKey(), true).put(TransportSettings.TCP_KEEP_IDLE.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_INTERVAL.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_COUNT.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_SEND_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(-1)).put(TransportSettings.TCP_RECEIVE_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(-1));
    }
    final Settings settings3 = builder3.put(RemoteClusterPortSettings.TCP_NO_DELAY.getKey(), false).put(RemoteClusterPortSettings.TCP_KEEP_ALIVE.getKey(), false).put(RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(42)).put(RemoteClusterPortSettings.TCP_RECEIVE_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(99)).put(RemoteClusterPortSettings.TCP_REUSE_ADDRESS.getKey(), false == TransportSettings.TCP_REUSE_ADDRESS.get(Settings.EMPTY)).build();
    final var options3 = RemoteClusterClientBootstrapOptions.fromSettings(settings3);
    assertThat(options3.isEmpty(), is(false));
    assertThat(options3.tcpNoDelay(), is(false));
    assertThat(options3.tcpKeepAlive(), is(false));
    assertThat(options3.tcpKeepIdle(), nullValue());
    assertThat(options3.tcpKeepInterval(), nullValue());
    assertThat(options3.tcpKeepCount(), nullValue());
    assertThat(options3.tcpSendBufferSize(), equalTo(ByteSizeValue.ofBytes(42)));
    assertThat(options3.tcpReceiveBufferSize(), equalTo(ByteSizeValue.ofBytes(99)));
    assertThat(options3.tcpReuseAddress(), notNullValue());
    final Settings.Builder builder4 = Settings.builder();
    if (randomBoolean()) {
        builder4.put(TransportSettings.TCP_NO_DELAY.getKey(), true).put(TransportSettings.TCP_KEEP_ALIVE.getKey(), true).put(TransportSettings.TCP_KEEP_IDLE.getKey(), 299).put(TransportSettings.TCP_KEEP_INTERVAL.getKey(), 299).put(TransportSettings.TCP_KEEP_COUNT.getKey(), 299).put(TransportSettings.TCP_SEND_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(-1)).put(TransportSettings.TCP_RECEIVE_BUFFER_SIZE.getKey(), ByteSizeValue.ofBytes(-1));
    }
    if (randomBoolean()) {
        builder4.put(RemoteClusterPortSettings.TCP_KEEP_ALIVE.getKey(), true);
    }
    final boolean differentKeepIdle = randomBoolean();
    if (differentKeepIdle) {
        builder4.put(RemoteClusterPortSettings.TCP_KEEP_IDLE.getKey(), 42);
    }
    final boolean differentKeepInterval = randomBoolean();
    final boolean differentKeepCount = false == differentKeepInterval;
    if (differentKeepInterval) {
        builder4.put(RemoteClusterPortSettings.TCP_KEEP_INTERVAL.getKey(), 43);
    }
    if (differentKeepCount) {
        builder4.put(RemoteClusterPortSettings.TCP_KEEP_COUNT.getKey(), 44);
    }
    final Settings settings4 = builder4.build();
    final var options4 = RemoteClusterClientBootstrapOptions.fromSettings(settings4);
    assertThat(options4.isEmpty(), is(false));
    assertThat(options4.tcpKeepAlive(), is(true));
    assertThat(options4.tcpKeepIdle(), differentKeepIdle ? equalTo(42) : nullValue());
    assertThat(options4.tcpKeepInterval(), differentKeepInterval ? equalTo(43) : nullValue());
    assertThat(options4.tcpKeepCount(), differentKeepCount ? equalTo(44) : nullValue());
    final Settings settings5 = Settings.builder().put(settings2).put(RemoteClusterPortSettings.TCP_NO_DELAY.getKey(), TransportSettings.TCP_NO_DELAY.get(settings2)).put(RemoteClusterPortSettings.TCP_KEEP_ALIVE.getKey(), TransportSettings.TCP_KEEP_ALIVE.get(settings2)).put(RemoteClusterPortSettings.TCP_KEEP_IDLE.getKey(), TransportSettings.TCP_KEEP_IDLE.get(settings2)).put(RemoteClusterPortSettings.TCP_KEEP_INTERVAL.getKey(), TransportSettings.TCP_KEEP_INTERVAL.get(settings2)).put(RemoteClusterPortSettings.TCP_KEEP_COUNT.getKey(), TransportSettings.TCP_KEEP_COUNT.get(settings2)).put(RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE.getKey(), TransportSettings.TCP_SEND_BUFFER_SIZE.get(settings2)).put(RemoteClusterPortSettings.TCP_RECEIVE_BUFFER_SIZE.getKey(), TransportSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings2)).put(RemoteClusterPortSettings.TCP_REUSE_ADDRESS.getKey(), TransportSettings.TCP_REUSE_ADDRESS.get(settings2)).build();
    final var options5 = RemoteClusterClientBootstrapOptions.fromSettings(settings5);
    assertThat(options5.isEmpty(), is(true));
    final Settings settings6 = Settings.builder().put(TransportSettings.TCP_KEEP_ALIVE.getKey(), false).put(TransportSettings.TCP_KEEP_IDLE.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_INTERVAL.getKey(), randomIntBetween(-1, 300)).put(TransportSettings.TCP_KEEP_COUNT.getKey(), randomIntBetween(-1, 300)).put(RemoteClusterPortSettings.TCP_KEEP_ALIVE.getKey(), false).put(RemoteClusterPortSettings.TCP_KEEP_IDLE.getKey(), randomIntBetween(-1, 300)).put(RemoteClusterPortSettings.TCP_KEEP_INTERVAL.getKey(), randomIntBetween(-1, 300)).put(RemoteClusterPortSettings.TCP_KEEP_COUNT.getKey(), randomIntBetween(-1, 300)).build();
    final var options6 = RemoteClusterClientBootstrapOptions.fromSettings(settings6);
    assertThat(options6.isEmpty(), is(true));
}
293958.361116elasticsearch
public void setup() throws Exception {
    final RealmConfig.RealmIdentifier realmIdentifier = new RealmConfig.RealmIdentifier("oidc", REALM_NAME);
    final Settings settings = getBasicRealmSettings().put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).put("path.home", createTempDir()).put(RealmSettings.getFullSettingKey(realmIdentifier, RealmSettings.ORDER_SETTING), 0).build();
    final Settings sslSettings = Settings.builder().put("xpack.security.authc.realms.oidc.oidc-realm.ssl.verification_mode", "certificate").put("path.home", createTempDir()).build();
    final ThreadContext threadContext = new ThreadContext(settings);
    final ThreadPool threadPool = mock(ThreadPool.class);
    when(threadPool.getThreadContext()).thenReturn(threadContext);
    AuthenticationTestHelper.builder().user(new User("kibana")).realmRef(new Authentication.RealmRef("realm", "type", "node")).build(false).writeToContext(threadContext);
    indexRequests = new ArrayList<>();
    bulkRequests = new ArrayList<>();
    client = mock(Client.class);
    when(client.threadPool()).thenReturn(threadPool);
    when(client.settings()).thenReturn(settings);
    doAnswer(invocationOnMock -> {
        GetRequestBuilder builder = new GetRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]).setId((String) invocationOnMock.getArguments()[1]);
        return builder;
    }).when(client).prepareGet(nullable(String.class), nullable(String.class));
    doAnswer(invocationOnMock -> {
        IndexRequestBuilder builder = new IndexRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]);
        return builder;
    }).when(client).prepareIndex(nullable(String.class));
    doAnswer(invocationOnMock -> {
        UpdateRequestBuilder builder = new UpdateRequestBuilder(client);
        builder.setIndex((String) invocationOnMock.getArguments()[0]).setId((String) invocationOnMock.getArguments()[1]);
        return builder;
    }).when(client).prepareUpdate(nullable(String.class), anyString());
    doAnswer(invocationOnMock -> {
        BulkRequestBuilder builder = new BulkRequestBuilder(client);
        return builder;
    }).when(client).prepareBulk();
    doAnswer(invocationOnMock -> {
        IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[0];
        @SuppressWarnings("unchecked")
        ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[1];
        indexRequests.add(indexRequest);
        final IndexResponse response = new IndexResponse(indexRequest.shardId(), indexRequest.id(), 1, 1, 1, true);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).index(any(IndexRequest.class), anyActionListener());
    doAnswer(invocationOnMock -> {
        IndexRequest indexRequest = (IndexRequest) invocationOnMock.getArguments()[1];
        @SuppressWarnings("unchecked")
        ActionListener<IndexResponse> listener = (ActionListener<IndexResponse>) invocationOnMock.getArguments()[2];
        indexRequests.add(indexRequest);
        final IndexResponse response = new IndexResponse(new ShardId("test", "test", 0), indexRequest.id(), 1, 1, 1, true);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).execute(eq(TransportIndexAction.TYPE), any(IndexRequest.class), anyActionListener());
    doAnswer(invocationOnMock -> {
        BulkRequest bulkRequest = (BulkRequest) invocationOnMock.getArguments()[0];
        @SuppressWarnings("unchecked")
        ActionListener<BulkResponse> listener = (ActionListener<BulkResponse>) invocationOnMock.getArguments()[1];
        bulkRequests.add(bulkRequest);
        final BulkResponse response = new BulkResponse(new BulkItemResponse[0], 1);
        listener.onResponse(response);
        return Void.TYPE;
    }).when(client).bulk(any(BulkRequest.class), anyActionListener());
    final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class);
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).prepareIndexIfNeededThenExecute(anyConsumer(), any(Runnable.class));
    doAnswer(inv -> {
        ((Runnable) inv.getArguments()[1]).run();
        return null;
    }).when(securityIndex).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class));
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true);
    when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true);
    when(securityIndex.defensiveCopy()).thenReturn(securityIndex);
    final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool);
    final MockLicenseState licenseState = mock(MockLicenseState.class);
    when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true);
    tokenService = new TokenService(settings, Clock.systemUTC(), client, licenseState, new SecurityContext(settings, threadContext), securityIndex, securityIndex, clusterService);
    final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet());
    final Realms realms = mock(Realms.class);
    action = new TransportOpenIdConnectLogoutAction(transportService, mock(ActionFilters.class), realms, tokenService);
    final Environment env = TestEnvironment.newEnvironment(settings);
    final RealmConfig realmConfig = new RealmConfig(realmIdentifier, settings, env, threadContext);
    oidcRealm = new OpenIdConnectRealm(realmConfig, new SSLService(TestEnvironment.newEnvironment(sslSettings)), mock(UserRoleMapper.class), mock(ResourceWatcherService.class));
    when(realms.realm(realmConfig.name())).thenReturn(oidcRealm);
}
295199.461106elasticsearch
public void testAuthorize() {
    IndexMetadata.Builder imbBuilder = IndexMetadata.builder("_index").settings(indexSettings(IndexVersion.current(), 1, 1)).putAlias(AliasMetadata.builder("_alias"));
    Metadata md = Metadata.builder().put(imbBuilder).build();
    FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY);
    SortedMap<String, IndexAbstraction> lookup = md.getIndicesLookup();
    Set<BytesReference> query = Collections.singleton(new BytesArray("{}"));
    String[] fields = new String[] { "_field" };
    Role role = Role.builder(RESTRICTED_INDICES, "_role").add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_index").build();
    IndicesAccessControl permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_index"), lookup, fieldPermissionsCache);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field"));
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    role = Role.builder(RESTRICTED_INDICES, "_role").add(new FieldPermissions(fieldPermissionDef(fields, null)), null, IndexPrivilege.ALL, randomBoolean(), "_index").build();
    permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_index"), lookup, fieldPermissionsCache);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field"));
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(false));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getListOfQueries(), nullValue());
    role = Role.builder(RESTRICTED_INDICES, "_role").add(FieldPermissions.DEFAULT, query, IndexPrivilege.ALL, randomBoolean(), "_index").build();
    permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_index"), lookup, fieldPermissionsCache);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    role = Role.builder(RESTRICTED_INDICES, "_role").add(new FieldPermissions(fieldPermissionDef(fields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias").build();
    permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().grantsAccessTo("_field"));
    assertTrue(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    assertThat(permissions.getIndexPermissions("_alias"), notNullValue());
    assertTrue(permissions.getIndexPermissions("_alias").getFieldPermissions().grantsAccessTo("_field"));
    assertTrue(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    String[] allFields = randomFrom(new String[] { "*" }, new String[] { "foo", "*" }, new String[] { randomAlphaOfLengthBetween(1, 10), "*" });
    role = Role.builder(RESTRICTED_INDICES, "_role").add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias").build();
    permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    assertThat(permissions.getIndexPermissions("_alias"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), hasSize(1));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), equalTo(query));
    IndexMetadata.Builder imbBuilder1 = IndexMetadata.builder("_index_1").settings(indexSettings(IndexVersion.current(), 1, 1)).putAlias(AliasMetadata.builder("_alias"));
    md = Metadata.builder(md).put(imbBuilder1).build();
    lookup = md.getIndicesLookup();
    Set<BytesReference> fooQuery = Collections.singleton(new BytesArray("{foo}"));
    allFields = randomFrom(new String[] { "*" }, new String[] { "foo", "*" }, new String[] { randomAlphaOfLengthBetween(1, 10), "*" });
    role = Role.builder(RESTRICTED_INDICES, "_role").add(new FieldPermissions(fieldPermissionDef(allFields, null)), fooQuery, IndexPrivilege.ALL, randomBoolean(), "_alias").add(new FieldPermissions(fieldPermissionDef(allFields, null)), query, IndexPrivilege.ALL, randomBoolean(), "_alias").build();
    permissions = role.authorize(TransportSearchAction.TYPE.name(), Sets.newHashSet("_alias"), lookup, fieldPermissionsCache);
    Set<BytesReference> bothQueries = Sets.union(fooQuery, query);
    assertThat(permissions.getIndexPermissions("_index"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_index").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), hasSize(2));
    assertThat(permissions.getIndexPermissions("_index").getDocumentPermissions().getSingleSetOfQueries(), equalTo(bothQueries));
    assertThat(permissions.getIndexPermissions("_index_1"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_index_1").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_index_1").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_index_1").getDocumentPermissions().getSingleSetOfQueries(), hasSize(2));
    assertThat(permissions.getIndexPermissions("_index_1").getDocumentPermissions().getSingleSetOfQueries(), equalTo(bothQueries));
    assertThat(permissions.getIndexPermissions("_alias"), notNullValue());
    assertFalse(permissions.getIndexPermissions("_alias").getFieldPermissions().hasFieldLevelSecurity());
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().hasDocumentLevelPermissions(), is(true));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), hasSize(2));
    assertThat(permissions.getIndexPermissions("_alias").getDocumentPermissions().getSingleSetOfQueries(), equalTo(bothQueries));
}
292624.396125elasticsearch
private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Exception {
    final String indexName = "test-idx";
    final String policyId = "test-policy";
    final SnapshotState expectedUnsuccessfulState = partialSuccess ? SnapshotState.PARTIAL : SnapshotState.FAILED;
    createAndPopulateIndex(indexName);
    createRepositoryNoVerify(REPO, "mock");
    createSnapshotPolicy(policyId, "snap", NEVER_EXECUTE_CRON_SCHEDULE, REPO, indexName, true, partialSuccess, new SnapshotRetentionConfiguration(null, 1, 2));
    AtomicReference<String> failedSnapshotName = new AtomicReference<>();
    {
        if (partialSuccess) {
            logger.info("-->  stopping random data node, which should cause shards to go missing");
            internalCluster().stopRandomDataNode();
            assertBusy(() -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth().get().getStatus()), 30, TimeUnit.SECONDS);
            blockMasterFromFinalizingSnapshotOnIndexFile(REPO);
            logger.info("-->  start snapshot");
            ActionFuture<ExecuteSnapshotLifecycleAction.Response> snapshotFuture = client().execute(ExecuteSnapshotLifecycleAction.INSTANCE, new ExecuteSnapshotLifecycleAction.Request(policyId));
            waitForBlock(internalCluster().getMasterName(), REPO);
            logger.info("-->  stopping master node");
            internalCluster().stopCurrentMasterNode();
            logger.info("-->  wait until the snapshot is done");
            failedSnapshotName.set(snapshotFuture.get().getSnapshotName());
            assertNotNull(failedSnapshotName.get());
        } else {
            final String snapshotName = "failed-snapshot-1";
            addBwCFailedSnapshot(REPO, snapshotName, Collections.singletonMap(SnapshotsService.POLICY_ID_METADATA_FIELD, policyId));
            failedSnapshotName.set(snapshotName);
        }
        logger.info("-->  verify that snapshot [{}] is {}", failedSnapshotName.get(), expectedUnsuccessfulState);
        assertBusy(() -> {
            try {
                GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
                SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
                assertEquals(expectedUnsuccessfulState, snapshotInfo.state());
            } catch (SnapshotMissingException ex) {
                logger.info("failed to find snapshot {}, retrying", failedSnapshotName);
                throw new AssertionError(ex);
            }
        });
    }
    {
        logger.info("--> executing SLM retention");
        assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get());
    }
    AtomicReference<String> successfulSnapshotName = new AtomicReference<>();
    {
        logger.info("--> deleting old index [{}], as it is now missing shards", indexName);
        assertAcked(indicesAdmin().prepareDelete(indexName).get());
        createAndPopulateIndex(indexName);
        logger.info("--> unblocking snapshots");
        unblockNode(REPO, internalCluster().getMasterName());
        unblockAllDataNodes(REPO);
        logger.info("--> taking new snapshot");
        ActionFuture<ExecuteSnapshotLifecycleAction.Response> snapshotResponse = client().execute(ExecuteSnapshotLifecycleAction.INSTANCE, new ExecuteSnapshotLifecycleAction.Request(policyId));
        logger.info("--> waiting for snapshot to complete");
        successfulSnapshotName.set(snapshotResponse.get().getSnapshotName());
        assertNotNull(successfulSnapshotName.get());
        logger.info("-->  verify that snapshot [{}] succeeded", successfulSnapshotName.get());
        assertBusy(() -> {
            final SnapshotInfo snapshotInfo;
            try {
                GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get();
                snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
            } catch (SnapshotMissingException sme) {
                throw new AssertionError(sme);
            }
            assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
        }, 30L, TimeUnit.SECONDS);
    }
    {
        logger.info("-->  verify that snapshot [{}] still exists", failedSnapshotName.get());
        GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
        SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
        assertEquals(expectedUnsuccessfulState, snapshotInfo.state());
    }
    {
        logger.info("--> executing SLM retention");
        assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get());
        logger.info("--> waiting for {} snapshot [{}] to be deleted", expectedUnsuccessfulState, failedSnapshotName.get());
        assertBusy(() -> {
            try {
                GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
                assertThat(snapshotsStatusResponse.getSnapshots(), empty());
            } catch (SnapshotMissingException e) {
            }
            logger.info("--> {} snapshot [{}] has been deleted, checking successful snapshot [{}] still exists", expectedUnsuccessfulState, failedSnapshotName.get(), successfulSnapshotName.get());
            GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get();
            SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
            assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
        }, 30L, TimeUnit.SECONDS);
    }
}
295303.661113elasticsearch
public void testFormatting() {
    ZoneId zoneId = ZoneId.of("Etc/GMT-10");
    Literal dateTime = l(dateTime(2019, 9, 3, 18, 10, 37, 123456789));
    assertEquals("AD : 3", new DateTimeFormat(Source.EMPTY, dateTime, l("G : Q"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04", new DateTimeFormat(Source.EMPTY, dateTime, l("YYYY-MM-dd"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("04:10:37.123456", new DateTimeFormat(Source.EMPTY, dateTime, l("HH:mm:ss.SSSSSS"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 04:10:37.12345678", new DateTimeFormat(Source.EMPTY, dateTime, l("YYYY-MM-dd HH:mm:ss.SSSSSSSS"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("+1000", new DateTimeFormat(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("Etc/GMT-10", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("Etc/GMT-10", new DateTimeFormat(Source.EMPTY, dateTime, l("VV"), zoneId).makePipe().asProcessor().process(null));
    zoneId = ZoneId.of("America/Sao_Paulo");
    assertEquals("-0300", new DateTimeFormat(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("BRT", new DateTimeFormat(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("America/Sao_Paulo", new DateTimeFormat(Source.EMPTY, dateTime, l("VV"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("07:11:22.1234", new DateTimeFormat(Source.EMPTY, l(time(10, 11, 22, 123456789), TIME), l("HH:mm:ss.SSSS"), zoneId).makePipe().asProcessor().process(null));
    zoneId = ZoneId.of("Etc/GMT-10");
    dateTime = l(dateTime(2019, 9, 3, 18, 10, 37, 123456789));
    assertEquals("G : Q", new Format(Source.EMPTY, dateTime, l("G : Q"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("AD", new Format(Source.EMPTY, dateTime, l("g"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("YYYY-09-04", new Format(Source.EMPTY, dateTime, l("YYYY-MM-dd"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 Wed", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd ddd"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 Wednesday", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd dddd"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("04:10:37.123456", new Format(Source.EMPTY, dateTime, l("HH:mm:ss.ffffff"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 04:10:37.12345678", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd HH:mm:ss.ffffffff"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 04:10:37.12345678 AM", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd HH:mm:ss.ffffffff tt"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2019-09-04 04:10:37.12345678 AM", new Format(Source.EMPTY, dateTime, l("yyyy-MM-dd HH:mm:ss.ffffffff t"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("Z", new Format(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("+10", new Format(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("Etc/GMT-10", new Format(Source.EMPTY, dateTime, l("K"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("1", new Format(Source.EMPTY, dateTime, l("F"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("12", new Format(Source.EMPTY, dateTime, l("FF"), zoneId).makePipe().asProcessor().process(null));
    zoneId = ZoneId.of("America/Sao_Paulo");
    assertEquals("Z", new Format(Source.EMPTY, dateTime, l("Z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("-03", new Format(Source.EMPTY, dateTime, l("z"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("VV", new Format(Source.EMPTY, dateTime, l("VV"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("07:11:22.1234", new Format(Source.EMPTY, l(time(10, 11, 22, 123456789), TIME), l("HH:mm:ss.ffff"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("10:11", new Format(Source.EMPTY, l(time(10, 11, 22, 123456789), TIME), l("H:m"), ZoneOffset.UTC).makePipe().asProcessor().process(null));
    assertEquals("21:9", new Format(Source.EMPTY, l(time(21, 11, 22, 123456789), TIME), l("H:h"), ZoneOffset.UTC).makePipe().asProcessor().process(null));
    assertEquals("2-02", new Format(Source.EMPTY, l(time(21, 11, 2, 123456789), TIME), l("s-ss"), ZoneOffset.UTC).makePipe().asProcessor().process(null));
    assertEquals("9-09-Sep-September", new Format(Source.EMPTY, dateTime, l("M-MM-MMM-MMMM"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("arr: 3:10 PM", new Format(Source.EMPTY, dateTime, l("'arr:' h:m t"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("-03/-0300/-03:00", new Format(Source.EMPTY, dateTime, l("z/zz/zzz"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("3", new Format(Source.EMPTY, dateTime, l("d"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("2001-01-2001-02001", new Format(Source.EMPTY, l(dateTime(2001, 9, 3, 18, 10, 37, 123456789)), l("y-yy-yyyy-yyyyy"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("%9-\"09-\\Sep-September", new Format(Source.EMPTY, dateTime, l("%M-\\\"MM-\\\\MMM-MMMM"), zoneId).makePipe().asProcessor().process(null));
    assertEquals("45-0045", new Format(Source.EMPTY, l(dateTime(45, 9, 3, 18, 10, 37, 123456789)), l("y-yyyy"), zoneId).makePipe().asProcessor().process(null));
}
292988.2714112elasticsearch
private static ShardStats[] createRandomShardStats(Map<String, long[]> expectedCheckpoints, Set<String> userIndices, boolean skipPrimaries, boolean inconsistentGlobalCheckpoints, boolean missingSeqNoStats) {
    List<Index> indices = new ArrayList<>();
    indices.add(new Index("index-1", UUIDs.randomBase64UUID(random())));
    indices.add(new Index("index-2", UUIDs.randomBase64UUID(random())));
    indices.add(new Index("index-3", UUIDs.randomBase64UUID(random())));
    String missingSeqNoStatsIndex = randomFrom(userIndices);
    List<ShardStats> shardStats = new ArrayList<>();
    for (final Index index : indices) {
        int numShards = randomIntBetween(1, 5);
        List<Long> checkpoints = new ArrayList<>();
        for (int shardIndex = 0; shardIndex < numShards; shardIndex++) {
            int numShardCopies = randomIntBetween(2, 4);
            int primaryShard = 0;
            if (skipPrimaries) {
                primaryShard = randomInt(numShardCopies - 1);
            }
            int inconsistentReplica = -1;
            if (inconsistentGlobalCheckpoints) {
                List<Integer> replicas = new ArrayList<>(numShardCopies - 1);
                for (int i = 0; i < numShardCopies; i++) {
                    if (primaryShard != i) {
                        replicas.add(i);
                    }
                }
                inconsistentReplica = randomFrom(replicas);
            }
            long localCheckpoint = randomLongBetween(100L, 100000000L);
            long globalCheckpoint = randomBoolean() ? localCheckpoint : randomLongBetween(100L, 100000000L);
            long maxSeqNo = Math.max(localCheckpoint, globalCheckpoint);
            SeqNoStats validSeqNoStats = null;
            if (missingSeqNoStats && index.getName().equals(missingSeqNoStatsIndex)) {
                checkpoints.add(-1L);
            } else {
                validSeqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint);
                checkpoints.add(globalCheckpoint);
            }
            for (int replica = 0; replica < numShardCopies; replica++) {
                ShardId shardId = new ShardId(index, shardIndex);
                boolean primary = (replica == primaryShard);
                Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(shardIndex));
                ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, primary, primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null), ShardRouting.Role.DEFAULT);
                shardRouting = shardRouting.initialize("node-0", null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
                shardRouting = shardRouting.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
                CommonStats stats = new CommonStats();
                stats.fieldData = new FieldDataStats();
                stats.queryCache = new QueryCacheStats();
                stats.docs = new DocsStats();
                stats.store = new StoreStats();
                stats.indexing = new IndexingStats();
                stats.search = new SearchStats();
                stats.segments = new SegmentsStats();
                stats.merge = new MergeStats();
                stats.refresh = new RefreshStats();
                stats.completion = new CompletionStats();
                stats.requestCache = new RequestCacheStats();
                stats.get = new GetStats();
                stats.flush = new FlushStats();
                stats.warmer = new WarmerStats();
                if (inconsistentReplica == replica) {
                    SeqNoStats invalidSeqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint - randomLongBetween(10L, 100L));
                    shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, invalidSeqNoStats, null, false, 0));
                } else {
                    shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, validSeqNoStats, null, false, 0));
                }
            }
        }
        if (userIndices.contains(index.getName())) {
            expectedCheckpoints.put(index.getName(), checkpoints.stream().mapToLong(l -> l).toArray());
        }
    }
    Collections.shuffle(shardStats, random());
    ShardStats[] shardStatsArray = shardStats.toArray(new ShardStats[0]);
    return shardStatsArray;
}
294031.54111elasticsearch
public void testFindTriggeredWatchesGoodCase() {
    ClusterState.Builder csBuilder = new ClusterState.Builder(new ClusterName("_name"));
    RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
    Metadata.Builder metadataBuilder = Metadata.builder();
    metadataBuilder.put(IndexMetadata.builder(TriggeredWatchStoreField.INDEX_NAME).settings(indexSettings));
    final Index index = metadataBuilder.get(TriggeredWatchStoreField.INDEX_NAME).getIndex();
    IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index);
    ShardId shardId = new ShardId(index, 0);
    indexRoutingTableBuilder.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(TestShardRouting.newShardRouting(shardId, "_node_id", null, true, ShardRoutingState.STARTED)));
    indexRoutingTableBuilder.addReplica(ShardRouting.Role.DEFAULT);
    routingTableBuilder.add(indexRoutingTableBuilder.build());
    csBuilder.metadata(metadataBuilder);
    csBuilder.routingTable(routingTableBuilder.build());
    ClusterState cs = csBuilder.build();
    doAnswer(invocation -> {
        @SuppressWarnings("unchecked")
        ActionListener<BroadcastResponse> listener = (ActionListener<BroadcastResponse>) invocation.getArguments()[2];
        listener.onResponse(mockRefreshResponse(1, 1));
        return null;
    }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any());
    SearchResponse searchResponse1 = mock(SearchResponse.class);
    when(searchResponse1.getSuccessfulShards()).thenReturn(1);
    when(searchResponse1.getTotalShards()).thenReturn(1);
    BytesArray source = new BytesArray("{}");
    {
        SearchHit hit = SearchHit.unpooled(0, "first_foo");
        hit.version(1L);
        hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null));
        hit.sourceRef(source);
        when(searchResponse1.getHits()).thenReturn(SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f));
    }
    when(searchResponse1.getScrollId()).thenReturn("_scrollId");
    doAnswer(invocation -> {
        @SuppressWarnings("unchecked")
        ActionListener<SearchResponse> listener = (ActionListener<SearchResponse>) invocation.getArguments()[2];
        listener.onResponse(searchResponse1);
        return null;
    }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any());
    doAnswer(invocation -> {
        SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1];
        @SuppressWarnings("unchecked")
        ActionListener<SearchResponse> listener = (ActionListener<SearchResponse>) invocation.getArguments()[2];
        if (request.scrollId().equals("_scrollId")) {
            var hit = SearchHit.unpooled(0, "second_foo");
            hit.version(1L);
            hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null));
            hit.sourceRef(source);
            ActionListener.respondAndRelease(listener, new SearchResponse(SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), null, null, false, null, null, 1, "_scrollId1", 1, 1, 0, 1, null, null));
        } else if (request.scrollId().equals("_scrollId1")) {
            ActionListener.respondAndRelease(listener, SearchResponseUtils.emptyWithTotalHits("_scrollId2", 1, 1, 0, 1, null, null));
        } else {
            listener.onFailure(new ElasticsearchException("test issue"));
        }
        return null;
    }).when(client).execute(eq(TransportSearchScrollAction.TYPE), any(), any());
    TriggeredWatch triggeredWatch = mock(TriggeredWatch.class);
    when(parser.parse(eq("_id"), eq(1L), any(BytesReference.class))).thenReturn(triggeredWatch);
    doAnswer(invocation -> {
        @SuppressWarnings("unchecked")
        ActionListener<ClearScrollResponse> listener = (ActionListener<ClearScrollResponse>) invocation.getArguments()[2];
        listener.onResponse(new ClearScrollResponse(true, 1));
        return null;
    }).when(client).execute(eq(TransportClearScrollAction.TYPE), any(), any());
    assertThat(TriggeredWatchStore.validate(cs), is(true));
    ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC);
    ScheduleTriggerEvent triggerEvent = new ScheduleTriggerEvent(now, now);
    Watch watch1 = mock(Watch.class);
    when(watch1.id()).thenReturn("first");
    TriggeredWatch triggeredWatch1 = new TriggeredWatch(new Wid("first", now), triggerEvent);
    when(parser.parse(eq("first_foo"), anyLong(), eq(source))).thenReturn(triggeredWatch1);
    Watch watch2 = mock(Watch.class);
    when(watch2.id()).thenReturn("second");
    TriggeredWatch triggeredWatch2 = new TriggeredWatch(new Wid("second", now), triggerEvent);
    when(parser.parse(eq("second_foo"), anyLong(), eq(source))).thenReturn(triggeredWatch2);
    Collection<Watch> watches = new ArrayList<>();
    watches.add(watch1);
    if (randomBoolean()) {
        watches.add(watch2);
    }
    Collection<TriggeredWatch> triggeredWatches = triggeredWatchStore.findTriggeredWatches(watches, cs);
    assertThat(triggeredWatches, notNullValue());
    assertThat(triggeredWatches, hasSize(watches.size()));
    verify(client, times(1)).execute(eq(RefreshAction.INSTANCE), any(), any());
    verify(client, times(1)).execute(eq(TransportSearchAction.TYPE), any(), any());
    verify(client, times(2)).execute(eq(TransportSearchScrollAction.TYPE), any(), any());
    verify(client, times(1)).execute(eq(TransportClearScrollAction.TYPE), any(), any());
}
292373.312299elasticsearch
private boolean runTransportVersionsTest() throws Exception {
    final var clusterState = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "/_cluster/state" + randomFrom("", "/nodes") + randomFrom("", "?local"))));
    final var description = clusterState.toString();
    final var nodeIds = clusterState.evaluateMapKeys("nodes");
    final Map<String, String> versionsByNodeId = Maps.newHashMapWithExpectedSize(nodeIds.size());
    for (final var nodeId : nodeIds) {
        versionsByNodeId.put(nodeId, clusterState.evaluate("nodes." + nodeId + ".version"));
    }
    final var hasTransportVersions = clusterState.evaluate("transport_versions") != null;
    final var hasNodesVersions = clusterState.evaluate("nodes_versions") != null;
    assertFalse(description, hasNodesVersions && hasTransportVersions);
    switch(CLUSTER_TYPE) {
        case OLD ->
            {
                if (clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) == false) {
                    assertFalse(description, hasTransportVersions);
                    assertFalse(description, hasNodesVersions);
                } else if (clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false) {
                    assertTrue(description, hasTransportVersions);
                    assertFalse(description, hasNodesVersions);
                } else {
                    assertFalse(description, hasTransportVersions);
                    assertTrue(description, hasNodesVersions);
                }
            }
        case MIXED ->
            {
                if (clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) == false) {
                    assertFalse(description, hasTransportVersions);
                } else if (clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false) {
                    assertTrue(description, hasNodesVersions || hasTransportVersions);
                } else {
                    assertFalse(description, hasTransportVersions);
                    assertTrue(description, hasNodesVersions);
                }
            }
        case UPGRADED ->
            {
                assertFalse(description, hasTransportVersions);
                assertTrue(description, hasNodesVersions);
                assertThat(description, versionsByNodeId.values(), everyItem(equalTo(Build.current().version())));
            }
    }
    if (hasTransportVersions) {
        assertFalse(description, clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION));
        assertTrue(description, clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED));
        assertNotEquals(description, ClusterType.UPGRADED, CLUSTER_TYPE);
        assertEquals(description, nodeIds.size(), clusterState.evaluateArraySize("transport_versions"));
        for (int i = 0; i < nodeIds.size(); i++) {
            final var path = "transport_versions." + i;
            final String nodeId = clusterState.evaluate(path + ".node_id");
            final var nodeDescription = nodeId + "/" + description;
            final var transportVersion = TransportVersion.fromString(clusterState.evaluate(path + ".transport_version"));
            final var nodeVersion = versionsByNodeId.get(nodeId);
            assertNotNull(nodeDescription, nodeVersion);
            if (nodeVersion.equals(Build.current().version())) {
                assertEquals(nodeDescription, TransportVersion.current(), transportVersion);
            } else {
                assertThat(nodeDescription, transportVersion, greaterThanOrEqualTo(INFERRED_TRANSPORT_VERSION));
            }
        }
    } else if (hasNodesVersions) {
        assertFalse(description, clusterHasFeature(RestTestLegacyFeatures.STATE_REPLACED_TRANSPORT_VERSION_WITH_NODES_VERSION) == false && CLUSTER_TYPE == ClusterType.OLD);
        assertEquals(description, nodeIds.size(), clusterState.evaluateArraySize("nodes_versions"));
        for (int i = 0; i < nodeIds.size(); i++) {
            final var path = "nodes_versions." + i;
            final String nodeId = clusterState.evaluate(path + ".node_id");
            final var nodeDescription = nodeId + "/" + description;
            final var transportVersion = TransportVersion.fromString(clusterState.evaluate(path + ".transport_version"));
            final var nodeVersion = versionsByNodeId.get(nodeId);
            assertNotNull(nodeDescription, nodeVersion);
            if (nodeVersion.equals(Build.current().version())) {
                assertThat(nodeDescription, transportVersion, clusterHasFeature(RestTestLegacyFeatures.TRANSPORT_VERSION_SUPPORTED) ? equalTo(TransportVersion.current()) : oneOf(TransportVersion.current(), INFERRED_TRANSPORT_VERSION));
                if (CLUSTER_TYPE == ClusterType.UPGRADED && transportVersion.equals(INFERRED_TRANSPORT_VERSION)) {
                    logger.info("{} - not fixed up yet, retrying", nodeDescription);
                    return false;
                }
            } else {
                var version = parseLegacyVersion(nodeVersion);
                var transportVersionIntroduced = version.map(v -> v.after(VERSION_INTRODUCING_TRANSPORT_VERSIONS)).orElse(true);
                if (transportVersionIntroduced) {
                    assertThat(nodeDescription, transportVersion, greaterThan(INFERRED_TRANSPORT_VERSION));
                } else {
                    assertEquals(nodeDescription, TransportVersion.fromId(version.get().id()), transportVersion);
                }
            }
        }
    }
    return true;
}
293195.691126gwt
public void testLink() {
    NamedRange programRange = new NamedRange("Program");
    NamedRange someModelARange = new NamedRange("com.some.app.SomeAModel");
    NamedRange someModelBRange = new NamedRange("com.some.app.SomeBModel");
    NamedRange someControllerRange = new NamedRange("com.some.app.SomeController");
    NamedRange entryPointRange = new NamedRange("com.some.app.EntryPoint");
    List<NamedRange> classRanges = Lists.newArrayList(someModelARange, someModelBRange, someControllerRange, entryPointRange);
    StatementRangesBuilder srb = new StatementRangesBuilder();
    JsSourceMapBuilder smb = new JsSourceMapBuilder();
    StringBuilder sb = new StringBuilder();
    appendStatement(sb, srb, smb, "<preamble>\n");
    appendStatement(sb, srb, smb, "<java.lang.Object />\n");
    appendStatement(sb, srb, smb, "<java.lang.Class />\n");
    appendStatement(sb, srb, smb, "</preamble>\n");
    {
        programRange.setStartPosition(sb.length());
        programRange.setStartLineNumber(lines);
        appendTypeStatement(sb, srb, smb, someModelARange, "<com.some.app.SomeModelA>\n");
        appendTypeStatement(sb, srb, smb, someModelBRange, "<com.some.app.SomeModelB>\n");
        appendTypeStatement(sb, srb, smb, someControllerRange, "<com.some.app.SomeController>\n");
        appendTypeStatement(sb, srb, smb, entryPointRange, "<com.some.app.EntryPoint>\n");
        programRange.setEndPosition(sb.length());
        programRange.setEndLineNumber(lines);
    }
    appendStatement(sb, srb, smb, "<epilogue>\n");
    appendStatement(sb, srb, smb, "<Some Bootstrap Code>\n");
    appendStatement(sb, srb, smb, "</epilogue>\n");
    String originalJs = sb.toString();
    MinimalRebuildCache minimalRebuildCache = new MinimalRebuildCache();
    Map<String, String> superClassesByClass = minimalRebuildCache.getImmediateTypeRelations().getImmediateSuperclassesByClass();
    StringAnalyzableTypeEnvironment typeEnvironment = minimalRebuildCache.getTypeEnvironment();
    typeEnvironment.recordTypeEnclosesMethod("java.lang.Object", "java.lang.Object::$clinit()V");
    superClassesByClass.put("java.lang.Class", "java.lang.Object");
    typeEnvironment.recordTypeEnclosesMethod("java.lang.Class", "java.lang.Class::$clinit()V");
    superClassesByClass.put("com.some.app.SomeAModel", "java.lang.Object");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeAModel", "com.some.app.SomeAModel::$clinit()V");
    superClassesByClass.put("com.some.app.SomeBModel", "java.lang.Object");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeBModel", "com.some.app.SomeBModel::$clinit()V");
    superClassesByClass.put("com.some.app.SomeController", "java.lang.Object");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeController", "com.some.app.SomeController::$clinit()V");
    superClassesByClass.put("com.some.app.EntryPoint", "java.lang.Object");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.EntryPoint", "com.some.app.EntryPoint::$clinit()V");
    minimalRebuildCache.setRootTypeNames(Lists.newArrayList("com.some.app.EntryPoint"));
    minimalRebuildCache.setEntryMethodNames(Lists.newArrayList("com.some.app.EntryPoint::onModuleLoad()V"));
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.EntryPoint", "com.some.app.EntryPoint::onModuleLoad()V");
    minimalRebuildCache.addTypeReference("com.some.app.EntryPoint", "com.some.app.SomeController");
    typeEnvironment.recordMethodInstantiatesType("com.some.app.EntryPoint::onModuleLoad()V", "com.some.app.SomeController");
    typeEnvironment.recordMethodCallsMethod("com.some.app.EntryPoint::onModuleLoad()V", "com.some.app.SomeController::createData()V");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeController", "com.some.app.SomeController::createData()V");
    minimalRebuildCache.addTypeReference("com.some.app.SomeController", "com.some.app.SomeBModel");
    typeEnvironment.recordMethodInstantiatesType("com.some.app.SomeController::createData()V", "com.some.app.SomeBModel");
    typeEnvironment.recordMethodCallsMethod("com.some.app.SomeController::createData()V", "com.some.app.SomeBModel::SomeBModel()V");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeBModel", "com.some.app.SomeBModel::SomeBModel()V");
    minimalRebuildCache.addTypeReference("com.some.app.SomeController", "com.some.app.SomeAModel");
    typeEnvironment.recordMethodInstantiatesType("com.some.app.SomeController::createData()V", "com.some.app.SomeAModel");
    typeEnvironment.recordMethodCallsMethod("com.some.app.SomeController::createData()V", "com.some.app.SomeAModel::SomeAModel()V");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeAModel", "com.some.app.SomeAModel::SomeAModel()V");
    JsTypeLinker jsTypeLinker = new JsTypeLinker(TreeLogger.NULL, new JsNoopTransformer(originalJs, srb.build(), smb.build()), classRanges, programRange, minimalRebuildCache, new JTypeOracle(null, minimalRebuildCache));
    jsTypeLinker.exec();
    assertEquals("<preamble>\n<java.lang.Object />\n<java.lang.Class />\n</preamble>\n" + "<com.some.app.EntryPoint>\n" + "<com.some.app.SomeModelA>\n" + "<com.some.app.SomeModelB>\n" + "<com.some.app.SomeController>\n" + "<epilogue>\n<Some Bootstrap Code>\n</epilogue>\n", jsTypeLinker.getJs());
    assertEquals(Lists.newArrayList("preamble", "java.lang.Object", "java.lang.Class", "/preamble", "com.some.app.EntryPoint", "com.some.app.SomeModelA", "com.some.app.SomeModelB", "com.some.app.SomeController", "epilogue", "Some Bootstrap Code", "/epilogue"), getTypeNames(jsTypeLinker.getSourceInfoMap()));
    assertEquals(11, jsTypeLinker.getSourceInfoMap().getLines());
    superClassesByClass.put("com.some.app.SomeAModel", "com.some.app.SomeBModel");
    jsTypeLinker = new JsTypeLinker(TreeLogger.NULL, new JsNoopTransformer(originalJs, srb.build(), smb.build()), classRanges, programRange, minimalRebuildCache, new JTypeOracle(null, minimalRebuildCache));
    jsTypeLinker.exec();
    assertEquals("<preamble>\n<java.lang.Object />\n<java.lang.Class />\n</preamble>\n" + "<com.some.app.EntryPoint>\n" + "<com.some.app.SomeModelB>\n" + "<com.some.app.SomeModelA>\n" + "<com.some.app.SomeController>\n" + "<epilogue>\n<Some Bootstrap Code>\n</epilogue>\n", jsTypeLinker.getJs());
    assertEquals(Lists.newArrayList("preamble", "java.lang.Object", "java.lang.Class", "/preamble", "com.some.app.EntryPoint", "com.some.app.SomeModelB", "com.some.app.SomeModelA", "com.some.app.SomeController", "epilogue", "Some Bootstrap Code", "/epilogue"), getTypeNames(jsTypeLinker.getSourceInfoMap()));
    assertEquals(11, jsTypeLinker.getSourceInfoMap().getLines());
    minimalRebuildCache.removeReferencesFrom("com.some.app.SomeController");
    minimalRebuildCache.addTypeReference("com.some.app.SomeController", "com.some.app.SomeBModel");
    typeEnvironment.removeControlFlowIndexesFor("com.some.app.SomeController");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeController", "com.some.app.SomeController::createData()V");
    typeEnvironment.recordTypeEnclosesMethod("com.some.app.SomeController", "com.some.app.SomeController::$clinit()V");
    typeEnvironment.recordMethodInstantiatesType("com.some.app.SomeController::createData()V", "com.some.app.SomeBModel");
    typeEnvironment.recordMethodCallsMethod("com.some.app.SomeController::createData()V", "com.some.app.SomeBModel::SomeBModel()V");
    jsTypeLinker = new JsTypeLinker(TreeLogger.NULL, new JsNoopTransformer(originalJs, srb.build(), smb.build()), classRanges, programRange, minimalRebuildCache, new JTypeOracle(null, minimalRebuildCache));
    jsTypeLinker.exec();
    assertEquals("<preamble>\n<java.lang.Object />\n<java.lang.Class />\n</preamble>\n" + "<com.some.app.EntryPoint>\n" + "<com.some.app.SomeModelB>\n" + "<com.some.app.SomeController>\n" + "<epilogue>\n<Some Bootstrap Code>\n</epilogue>\n", jsTypeLinker.getJs());
    assertEquals(Lists.newArrayList("preamble", "java.lang.Object", "java.lang.Class", "/preamble", "com.some.app.EntryPoint", "com.some.app.SomeModelB", "com.some.app.SomeController", "epilogue", "Some Bootstrap Code", "/epilogue"), getTypeNames(jsTypeLinker.getSourceInfoMap()));
    assertEquals(10, jsTypeLinker.getSourceInfoMap().getLines());
}
291727.5823111gwt
private void processElementsInIntersection(Set<String> intersection, ApiClass.MethodType methodType) {
    Set<ApiAbstractMethod> missingElements = missingMethods.get(methodType);
    Map<ApiAbstractMethod, Set<ApiChange>> intersectingElements = intersectingMethods.get(methodType);
    Set<ApiAbstractMethod> onlyInExisting = new HashSet<ApiAbstractMethod>();
    Set<ApiAbstractMethod> onlyInNew = new HashSet<ApiAbstractMethod>();
    Set<String> commonSignature = new HashSet<String>();
    for (String elementName : intersection) {
        Set<ApiAbstractMethod> methodsInNew = newClass.getApiMethodsByName(elementName, methodType);
        Set<ApiAbstractMethod> methodsInExisting = oldClass.getApiMethodsByName(elementName, methodType);
        onlyInNew.addAll(methodsInNew);
        onlyInExisting.addAll(methodsInExisting);
        Map<ApiAbstractMethod, ApiChange> incompatibilityMap = getOverloadedMethodIncompatibility(methodsInNew, methodsInExisting);
        for (Map.Entry<ApiAbstractMethod, ApiChange> entry : incompatibilityMap.entrySet()) {
            addProperty(intersectingElements, entry.getKey(), entry.getValue());
        }
        for (ApiAbstractMethod methodInExisting : methodsInExisting) {
            Set<ApiChange> allPossibleApiChanges = new HashSet<ApiChange>();
            ApiAbstractMethod sameSignatureMethod = null;
            for (ApiAbstractMethod methodInNew : methodsInNew) {
                Set<ApiChange> currentApiChange = new HashSet<ApiChange>();
                boolean hasSameSignature = false;
                if (methodInExisting.isCompatible(methodInNew)) {
                    if (methodInExisting.isOverridable()) {
                        currentApiChange.addAll(methodInExisting.getAllChangesInApi(methodInNew));
                    } else {
                        currentApiChange.addAll(methodInExisting.checkExceptionsAndReturnType(methodInNew));
                    }
                    for (ApiChange.Status status : methodInExisting.getModifierChanges(methodInNew)) {
                        currentApiChange.add(new ApiChange(methodInExisting, status));
                    }
                    if (methodInNew.getInternalSignature().equals(methodInExisting.getInternalSignature())) {
                        currentApiChange.add(new ApiChange(methodInExisting, ApiChange.Status.COMPATIBLE));
                        hasSameSignature = true;
                    } else {
                        currentApiChange.add(new ApiChange(methodInExisting, ApiChange.Status.COMPATIBLE_WITH, methodInNew.getApiSignature()));
                    }
                }
                if (currentApiChange.size() > 0) {
                    if (hasSameSignature) {
                        allPossibleApiChanges = currentApiChange;
                        sameSignatureMethod = methodInNew;
                    } else if (sameSignatureMethod == null) {
                        allPossibleApiChanges.addAll(currentApiChange);
                    }
                }
            }
            if (allPossibleApiChanges.size() > 0) {
                onlyInExisting.remove(methodInExisting);
                String signatureInExisting = methodInExisting.getInternalSignature();
                if (sameSignatureMethod != null && signatureInExisting.equals(sameSignatureMethod.getInternalSignature())) {
                    commonSignature.add(signatureInExisting);
                }
                for (ApiChange apiChange : allPossibleApiChanges) {
                    addProperty(intersectingElements, methodInExisting, apiChange);
                }
            }
        }
        for (ApiAbstractMethod methodInNew : methodsInNew) {
            ApiAbstractMethod sameSignatureMethod = null;
            for (ApiAbstractMethod methodInExisting : methodsInExisting) {
                if (methodInNew.getInternalSignature().equals(methodInExisting.getInternalSignature())) {
                    sameSignatureMethod = methodInExisting;
                    break;
                }
            }
            if (sameSignatureMethod != null) {
                continue;
            }
            for (ApiAbstractMethod methodInExisting : methodsInExisting) {
                if (methodInNew.isCompatible(methodInExisting)) {
                    for (ApiChange apiChange : methodInExisting.checkExceptionsAndReturnType(methodInNew)) {
                        addProperty(intersectingElements, methodInExisting, apiChange);
                    }
                }
            }
        }
    }
    missingElements.addAll(onlyInExisting);
}
292624.5710113gwt
public RebindResult create(TreeLogger logger, GeneratorContext context) throws UnableToCompleteException {
    TypeOracle typeOracle = context.getTypeOracle();
    JClassType serviceAsync = typeOracle.findType(serviceIntf.getQualifiedSourceName() + "Async");
    if (serviceAsync == null) {
        logger.branch(TreeLogger.ERROR, "Could not find an asynchronous version for the service interface " + serviceIntf.getQualifiedSourceName(), null);
        RemoteServiceAsyncValidator.logValidAsyncInterfaceDeclaration(logger, serviceIntf);
        throw new UnableToCompleteException();
    }
    if (checkAlreadyGenerated(typeOracle, serviceIntf)) {
        return new RebindResult(RebindMode.USE_EXISTING, getProxyQualifiedName());
    }
    RemoteServiceAsyncValidator rsav = new RemoteServiceAsyncValidator(logger, typeOracle);
    Map<JMethod, JMethod> syncMethToAsyncMethMap = rsav.validate(logger, serviceIntf, serviceAsync);
    final PropertyOracle propertyOracle = context.getPropertyOracle();
    TypeFilter blacklistTypeFilter = new BlacklistTypeFilter(logger, propertyOracle);
    Event event = SpeedTracerLogger.start(CompilerEventType.GENERATOR_RPC_STOB);
    SerializableTypeOracle typesSentFromBrowser;
    SerializableTypeOracle typesSentToBrowser;
    String rpcLog = null;
    try {
        SerializableTypeOracleBuilder typesSentFromBrowserBuilder = new SerializableTypeOracleBuilder(logger, context);
        typesSentFromBrowserBuilder.setTypeFilter(blacklistTypeFilter);
        SerializableTypeOracleBuilder typesSentToBrowserBuilder = new SerializableTypeOracleBuilder(logger, context);
        typesSentToBrowserBuilder.setTypeFilter(blacklistTypeFilter);
        addRoots(logger, typeOracle, typesSentFromBrowserBuilder, typesSentToBrowserBuilder);
        {
            if (logger.isLoggable(TreeLogger.Type.DEBUG)) {
                StringWriter stringWriter = new StringWriter();
                PrintWriter writer = new PrintWriter(stringWriter);
                typesSentFromBrowserBuilder.setLogOutputWriter(writer);
                typesSentToBrowserBuilder.setLogOutputWriter(writer);
                writer.write("====================================\n");
                writer.write("Types potentially sent from browser:\n");
                writer.write("====================================\n\n");
                writer.flush();
                typesSentFromBrowser = typesSentFromBrowserBuilder.build(logger);
                writer.write("===================================\n");
                writer.write("Types potentially sent from server:\n");
                writer.write("===================================\n\n");
                writer.flush();
                typesSentToBrowser = typesSentToBrowserBuilder.build(logger);
                writer.close();
                rpcLog = stringWriter.toString();
            } else {
                typesSentFromBrowser = typesSentFromBrowserBuilder.build(logger);
                typesSentToBrowser = typesSentToBrowserBuilder.build(logger);
            }
        }
    } finally {
        event.end();
    }
    if (checkCachedGeneratorResultValid(logger, context, typesSentFromBrowser, typesSentToBrowser)) {
        logger.log(TreeLogger.TRACE, "Reusing all cached artifacts for " + getProxyQualifiedName());
        return new RebindResult(RebindMode.USE_ALL_CACHED, getProxyQualifiedName());
    }
    try {
        ConfigurationProperty prop = context.getPropertyOracle().getConfigurationProperty(TypeSerializerCreator.GWT_ELIDE_TYPE_NAMES_FROM_RPC);
        elideTypeNames = Boolean.parseBoolean(prop.getValues().get(0));
    } catch (BadPropertyValueException e) {
        logger.log(TreeLogger.ERROR, "Configuration property " + TypeSerializerCreator.GWT_ELIDE_TYPE_NAMES_FROM_RPC + " is not defined. Is RemoteService.gwt.xml inherited?");
        throw new UnableToCompleteException();
    }
    SourceWriter srcWriter = getSourceWriter(logger, context, serviceAsync);
    if (srcWriter == null) {
        return new RebindResult(RebindMode.USE_EXISTING, getProxyQualifiedName());
    }
    generateTypeHandlers(logger, context, typesSentFromBrowser, typesSentToBrowser);
    String serializationPolicyStrongName = writeSerializationPolicyFile(logger, context, typesSentFromBrowser, typesSentToBrowser);
    String remoteServiceInterfaceName = elideTypeNames ? TypeNameObfuscator.SERVICE_INTERFACE_ID : SerializationUtils.getRpcTypeName(serviceIntf);
    generateProxyFields(srcWriter, typesSentFromBrowser, serializationPolicyStrongName, remoteServiceInterfaceName);
    generateProxyContructor(srcWriter);
    generateProxyMethods(srcWriter, typesSentFromBrowser, typeOracle, syncMethToAsyncMethMap);
    generateStreamWriterOverride(srcWriter);
    generateCheckRpcTokenTypeOverride(srcWriter, typeOracle, typesSentFromBrowser);
    srcWriter.commit(logger);
    if (rpcLog != null) {
        context.commitArtifact(logger, new RpcLogArtifact(serviceIntf.getQualifiedSourceName(), serializationPolicyStrongName, rpcLog));
    }
    if (checkGeneratorResultCacheability(context)) {
        RebindResult result = new RebindResult(RebindMode.USE_PARTIAL_CACHED, getProxyQualifiedName());
        CachedRpcTypeInformation cti = new CachedRpcTypeInformation(typesSentFromBrowser, typesSentToBrowser, customSerializersUsed, typesNotUsingCustomSerializers);
        CachedPropertyInformation cpi = new CachedPropertyInformation(logger, context.getPropertyOracle(), selectionPropsToCheck, configPropsToCheck);
        result.putClientData(CACHED_TYPE_INFO_KEY, cti);
        result.putClientData(CACHED_PROPERTY_INFO_KEY, cpi);
        return result;
    } else {
        return new RebindResult(RebindMode.USE_ALL_NEW_WITH_NO_CACHING, getProxyQualifiedName());
    }
}
292156.418117gwt
private void writeValidatePropertyMethod(SourceWriter sw, PropertyDescriptor p, boolean useField) throws UnableToCompleteException {
    Class<?> elementClass = p.getElementClass();
    JType elementType = beanHelper.getElementType(p, useField);
    sw.print("private final <T> void ");
    if (useField) {
        sw.print(validateMethodFieldName(p));
    } else {
        sw.print(validateMethodGetterName(p));
    }
    sw.println("(");
    sw.indent();
    sw.indent();
    sw.println("final GwtValidationContext<T> context,");
    sw.println("final Set<ConstraintViolation<T>> violations,");
    sw.println(beanHelper.getTypeCanonicalName() + " object,");
    sw.print("final ");
    sw.print(elementType.getParameterizedQualifiedSourceName());
    sw.println(" value,");
    sw.println("boolean honorValid,");
    sw.println("Class<?>... groups) {");
    sw.outdent();
    if (isPropertyConstrained(p, useField)) {
        sw.print("final GwtValidationContext<T> myContext = context.append(\"");
        sw.print(p.getPropertyName());
        sw.println("\");");
        sw.println("Node leafNode = myContext.getPath().getLeafNode();");
        sw.println("PathImpl path = myContext.getPath().getPathWithoutLeafNode();");
        sw.println("boolean isReachable;");
        sw.println("try {");
        sw.indent();
        sw.println("isReachable = myContext.getTraversableResolver().isReachable(object, " + "leafNode, myContext.getRootBeanClass(), path, " + (useField ? asLiteral(ElementType.FIELD) : asLiteral(ElementType.METHOD)) + ");");
        sw.outdent();
        sw.println("} catch (Exception e) {");
        sw.indent();
        sw.println("throw new ValidationException(\"TraversableResolver isReachable caused an " + "exception\", e);");
        sw.outdent();
        sw.println("}");
        sw.println("if (isReachable) {");
        sw.indent();
        if (p.isCascaded() && hasValid(p, useField)) {
            sw.println("if (honorValid && value != null) {");
            sw.indent();
            sw.println("boolean isCascadable;");
            sw.println("try {");
            sw.indent();
            sw.println("isCascadable = myContext.getTraversableResolver().isCascadable(object, " + "leafNode, myContext.getRootBeanClass(), path, " + (useField ? asLiteral(ElementType.FIELD) : asLiteral(ElementType.METHOD)) + ");");
            sw.outdent();
            sw.println("} catch (Exception e) {");
            sw.indent();
            sw.println("throw new ValidationException(\"TraversableResolver isCascadable caused an " + "exception\", e);");
            sw.outdent();
            sw.println("}");
            sw.println("if (isCascadable) {");
            sw.indent();
            if (isIterableOrMap(elementClass)) {
                JClassType associationType = beanHelper.getAssociationType(p, useField);
                createBeanHelper(associationType);
                if (Map.class.isAssignableFrom(elementClass)) {
                    writeValidateMap(sw, p);
                } else {
                    writeValidateIterable(sw, p);
                }
            } else {
                createBeanHelper(elementClass);
                sw.println(" if (!context.alreadyValidated(value)) {");
                sw.indent();
                sw.print("violations.addAll(");
                sw.println("myContext.getValidator().validate(myContext, value, groups));");
                sw.outdent();
                sw.println("}");
            }
            sw.outdent();
            sw.println("}");
            sw.outdent();
            sw.println("}");
        }
        Set<Object> includedAnnotations = Sets.newHashSet();
        int count = 0;
        for (ConstraintDescriptor<?> constraint : p.getConstraintDescriptors()) {
            if (areConstraintDescriptorGroupsValid(constraint)) {
                Object annotation = constraint.getAnnotation();
                if (hasMatchingAnnotation(p, useField, constraint)) {
                    String constraintDescriptorVar = constraintDescriptorVar(p.getPropertyName(), count);
                    if (!includedAnnotations.contains(annotation)) {
                        if (useField) {
                            writeValidateConstraint(sw, p, elementClass, constraint, constraintDescriptorVar);
                        } else {
                            boolean hasField = beanHelper.hasField(p);
                            if (!hasField || (hasField && !hasMatchingAnnotation(p, true, constraint))) {
                                writeValidateConstraint(sw, p, elementClass, constraint, constraintDescriptorVar);
                            }
                        }
                    } else {
                        if (!useField) {
                            writeValidateConstraint(sw, p, elementClass, constraint, constraintDescriptorVar);
                        }
                    }
                    includedAnnotations.add(annotation);
                }
                count++;
            }
        }
        sw.outdent();
        sw.println("}");
    }
    sw.outdent();
    sw.println("}");
}
292935.871132gwt
public void testCompoundAssignmentsWithInteger() {
    {
        Integer operand, original, result;
        original = operand = 0;
        result = operand++;
        assertNotSame("[o++] original != operand, ", original, operand);
        assertSame("[o++] original == result, ", original, result);
        assertNotSame("[o++] result != operand, ", result, operand);
        assertSame("[o++] valueOf(n) == operand, ", 1, operand);
        assertEquals("[o++] n == operand.value, ", 1, operand.intValue());
    }
    {
        Integer operand, original, result;
        original = operand = 2;
        result = ++operand;
        assertNotSame("[++o] original != operand, ", original, operand);
        assertNotSame("[++o] original != result, ", original, result);
        assertSame("[++o] result == operand, ", result, operand);
        assertSame("[++o] valueOf(n) == operand, ", 3, operand);
        assertEquals("[++o] n == operand.value, ", 3, operand.intValue());
    }
    {
        Integer operand, original, result;
        original = operand = 5;
        result = operand--;
        assertNotSame("[o--] original != operand, ", original, operand);
        assertSame("[o--] original == result, ", original, result);
        assertNotSame("[o--] result != operand, ", result, operand);
        assertSame("[o--] valueOf(n) == operand, ", 4, operand);
        assertEquals("[o--] n == operand.value, ", 4, operand.intValue());
    }
    {
        Integer operand, original, result;
        original = operand = 7;
        result = --operand;
        assertNotSame("[--o] original != operand, ", original, operand);
        assertNotSame("[--o] original != result, ", original, result);
        assertSame("[--o] result == operand, ", result, operand);
        assertSame("[--o] valueOf(n) == operand, ", 6, operand);
        assertEquals("[--o] n == operand.value, ", 6, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 8;
        operand += 2;
        assertNotSame("[+=] original != operand, ", original, operand);
        assertSame("[+=] valueOf(n) == operand, ", 10, operand);
        assertEquals("[+=] n == operand.value, ", 10, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 11;
        operand -= 2;
        assertNotSame("[-=] original != operand, ", original, operand);
        assertSame("[-=] valueOf(n) == operand, ", 9, operand);
        assertEquals("[-=] n == operand.value, ", 9, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 21;
        operand *= 2;
        assertNotSame("[*=] original != operand, ", original, operand);
        assertSame("[*=] valueOf(n) == operand, ", 42, operand);
        assertEquals("[*=] n == operand.value, ", 42, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 30;
        operand /= 2;
        assertNotSame("[/=] original != operand, ", original, operand);
        assertSame("[/=] valueOf(n) == operand, ", 15, operand);
        assertEquals("[/=] n == operand.value, ", 15, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 123;
        operand %= 100;
        assertNotSame("[%=] original != operand, ", original, operand);
        assertSame("[%=] valueOf(n) == operand, ", 23, operand);
        assertEquals("[%=] n == operand.value, ", 23, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 0x55;
        operand &= 0xF;
        assertNotSame("[&=] original != operand, ", original, operand);
        assertSame("[&=] valueOf(n) == operand, ", 0x5, operand);
        assertEquals("[&=] n == operand.value, ", 0x5, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 0x55;
        operand |= 0xF;
        assertNotSame("[|=] original != operand, ", original, operand);
        assertSame("[|=] valueOf(n) == operand, ", 0x5F, operand);
        assertEquals("[|=] n == operand.value, ", 0x5F, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 0x55;
        operand ^= 0xF;
        assertNotSame("[&=] original != operand, ", original, operand);
        assertSame("[&=] valueOf(n) == operand, ", 0x5A, operand);
        assertEquals("[&=] n == operand.value, ", 0x5A, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = 0x3F;
        operand <<= 1;
        assertNotSame("[<<=] original != operand, ", original, operand);
        assertSame("[<<=] valueOf(n) == operand, ", 0x7E, operand);
        assertEquals("[<<=] n == operand.value, ", 0x7E, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = -16;
        operand >>= 1;
        assertNotSame("[>>=] original != operand, ", original, operand);
        assertSame("[>>=] valueOf(n) == operand, ", -8, operand);
        assertEquals("[>>=] n == operand.value, ", -8, operand.intValue());
    }
    {
        Integer operand, original;
        original = operand = -1;
        operand >>>= 1;
        assertNotSame("[>>>=] original != operand, ", original, operand);
        assertEquals("[>>>=] valueOf(n).equals(operand), ", Integer.valueOf(0x7FFFFFFF), operand);
        assertEquals("[>>>=] n == operand.value, ", 0x7FFFFFFF, operand.intValue());
    }
}
295229.771103gwt
public void testElementCreators() {
    Document doc = Document.get();
    assertEquals("a", doc.createAnchorElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("area", doc.createAreaElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("base", doc.createBaseElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("blockquote", doc.createBlockQuoteElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("br", doc.createBRElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("caption", doc.createCaptionElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("col", doc.createColElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("colgroup", doc.createColGroupElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("del", doc.createDelElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("div", doc.createDivElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("dl", doc.createDLElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("fieldset", doc.createFieldSetElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("form", doc.createFormElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("frame", doc.createFrameElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("frameset", doc.createFrameSetElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("head", doc.createHeadElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("h1", doc.createHElement(1).getTagName().toLowerCase(Locale.ROOT));
    assertEquals("hr", doc.createHRElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("iframe", doc.createIFrameElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("img", doc.createImageElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("ins", doc.createInsElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("label", doc.createLabelElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("legend", doc.createLegendElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("li", doc.createLIElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("link", doc.createLinkElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("map", doc.createMapElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("meta", doc.createMetaElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("object", doc.createObjectElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("ol", doc.createOLElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("optgroup", doc.createOptGroupElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("option", doc.createOptionElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("param", doc.createParamElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("p", doc.createPElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("pre", doc.createPreElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("q", doc.createQElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("script", doc.createScriptElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("select", doc.createSelectElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("select", doc.createSelectElement(false).getTagName().toLowerCase(Locale.ROOT));
    assertEquals("span", doc.createSpanElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("style", doc.createStyleElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("table", doc.createTableElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("tbody", doc.createTBodyElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("td", doc.createTDElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("textarea", doc.createTextAreaElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("tfoot", doc.createTFootElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("thead", doc.createTHeadElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("th", doc.createTHElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("title", doc.createTitleElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("tr", doc.createTRElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("ul", doc.createULElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("button", doc.createPushButtonElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("button", doc.createResetButtonElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("button", doc.createSubmitButtonElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("button", doc.createPushButtonElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("reset", doc.createResetButtonElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("submit", doc.createSubmitButtonElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createCheckInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createFileInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createHiddenInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createImageInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createPasswordInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createRadioInputElement("foo").getTagName().toLowerCase(Locale.ROOT));
    assertEquals("input", doc.createTextInputElement().getTagName().toLowerCase(Locale.ROOT));
    assertEquals("button", doc.createButtonInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("checkbox", doc.createCheckInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("file", doc.createFileInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("hidden", doc.createHiddenInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("image", doc.createImageInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("password", doc.createPasswordInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("radio", doc.createRadioInputElement("foo").getType().toLowerCase(Locale.ROOT));
    assertEquals("reset", doc.createResetInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("submit", doc.createSubmitInputElement().getType().toLowerCase(Locale.ROOT));
    assertEquals("text", doc.createTextInputElement().getType().toLowerCase(Locale.ROOT));
}
292194.2527100hadoop
public static void main(String[] args) {
    String usage = "NativeLibraryChecker [-a|-h]\n" + "  -a  use -a to check all libraries are available\n" + "      by default just check hadoop library (and\n" + "      winutils.exe on Windows OS) is available\n" + "      exit with error code 1 if check failed\n" + "  -h  print this message\n";
    if (args.length > 1 || (args.length == 1 && !(args[0].equals("-a") || args[0].equals("-h")))) {
        System.err.println(usage);
        ExitUtil.terminate(1);
    }
    boolean checkAll = false;
    if (args.length == 1) {
        if (args[0].equals("-h")) {
            System.out.println(usage);
            return;
        }
        checkAll = true;
    }
    Configuration conf = new Configuration();
    boolean nativeHadoopLoaded = NativeCodeLoader.isNativeCodeLoaded();
    boolean zlibLoaded = false;
    boolean isalLoaded = false;
    boolean zStdLoaded = false;
    boolean pmdkLoaded = false;
    boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
    boolean openSslLoaded = false;
    boolean winutilsExists = false;
    String openSslDetail = "";
    String hadoopLibraryName = "";
    String zlibLibraryName = "";
    String isalDetail = "";
    String pmdkDetail = "";
    String zstdLibraryName = "";
    String bzip2LibraryName = "";
    String winutilsPath = null;
    if (nativeHadoopLoaded) {
        hadoopLibraryName = NativeCodeLoader.getLibraryName();
        zlibLoaded = ZlibFactory.isNativeZlibLoaded(conf);
        if (zlibLoaded) {
            zlibLibraryName = ZlibFactory.getLibraryName();
        }
        zStdLoaded = NativeCodeLoader.buildSupportsZstd() && ZStandardCodec.isNativeCodeLoaded();
        if (zStdLoaded && NativeCodeLoader.buildSupportsZstd()) {
            zstdLibraryName = ZStandardCodec.getLibraryName();
        }
        isalDetail = ErasureCodeNative.getLoadingFailureReason();
        if (isalDetail != null) {
            isalLoaded = false;
        } else {
            isalDetail = ErasureCodeNative.getLibraryName();
            isalLoaded = true;
        }
        pmdkDetail = NativeIO.POSIX.getPmdkSupportStateMessage();
        pmdkLoaded = NativeIO.POSIX.isPmdkAvailable();
        if (pmdkLoaded) {
            pmdkDetail = NativeIO.POSIX.Pmem.getPmdkLibPath();
        }
        openSslDetail = OpensslCipher.getLoadingFailureReason();
        if (openSslDetail != null) {
            openSslLoaded = false;
        } else {
            openSslDetail = OpensslCipher.getLibraryName();
            openSslLoaded = true;
        }
        if (bzip2Loaded) {
            bzip2LibraryName = Bzip2Factory.getLibraryName(conf);
        }
    }
    if (Shell.WINDOWS) {
        try {
            winutilsPath = Shell.getWinUtilsFile().getCanonicalPath();
            winutilsExists = true;
        } catch (IOException e) {
            LOG.debug("No Winutils: ", e);
            winutilsPath = e.getMessage();
            winutilsExists = false;
        }
        System.out.printf("winutils: %b %s%n", winutilsExists, winutilsPath);
    }
    System.out.println("Native library checking:");
    System.out.printf("hadoop:  %b %s%n", nativeHadoopLoaded, hadoopLibraryName);
    System.out.printf("zlib:    %b %s%n", zlibLoaded, zlibLibraryName);
    System.out.printf("zstd  :  %b %s%n", zStdLoaded, zstdLibraryName);
    System.out.printf("bzip2:   %b %s%n", bzip2Loaded, bzip2LibraryName);
    System.out.printf("openssl: %b %s%n", openSslLoaded, openSslDetail);
    System.out.printf("ISA-L:   %b %s%n", isalLoaded, isalDetail);
    System.out.printf("PMDK:    %b %s%n", pmdkLoaded, pmdkDetail);
    if (Shell.WINDOWS) {
        System.out.printf("winutils: %b %s%n", winutilsExists, winutilsPath);
    }
    if ((!nativeHadoopLoaded) || (Shell.WINDOWS && (!winutilsExists)) || (checkAll && !(zlibLoaded && bzip2Loaded && isalLoaded && zStdLoaded))) {
        ExitUtil.terminate(1);
    }
}
293579.349103hadoop
public void testTraditionalBinaryPrefix() throws Exception {
    String[] symbol = { "k", "m", "g", "t", "p", "e" };
    long m = 1024;
    for (String s : symbol) {
        assertEquals(0, string2long(0 + s));
        assertEquals(m, string2long(1 + s));
        m *= 1024;
    }
    assertEquals(0L, string2long("0"));
    assertEquals(1024L, string2long("1k"));
    assertEquals(-1024L, string2long("-1k"));
    assertEquals(1259520L, string2long("1230K"));
    assertEquals(-1259520L, string2long("-1230K"));
    assertEquals(104857600L, string2long("100m"));
    assertEquals(-104857600L, string2long("-100M"));
    assertEquals(956703965184L, string2long("891g"));
    assertEquals(-956703965184L, string2long("-891G"));
    assertEquals(501377302265856L, string2long("456t"));
    assertEquals(-501377302265856L, string2long("-456T"));
    assertEquals(11258999068426240L, string2long("10p"));
    assertEquals(-11258999068426240L, string2long("-10P"));
    assertEquals(1152921504606846976L, string2long("1e"));
    assertEquals(-1152921504606846976L, string2long("-1E"));
    String tooLargeNumStr = "10e";
    try {
        string2long(tooLargeNumStr);
        fail("Test passed for a number " + tooLargeNumStr + " too large");
    } catch (IllegalArgumentException e) {
        assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
    }
    String tooSmallNumStr = "-10e";
    try {
        string2long(tooSmallNumStr);
        fail("Test passed for a number " + tooSmallNumStr + " too small");
    } catch (IllegalArgumentException e) {
        assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
    }
    String invalidFormatNumStr = "10kb";
    char invalidPrefix = 'b';
    try {
        string2long(invalidFormatNumStr);
        fail("Test passed for a number " + invalidFormatNumStr + " has invalid format");
    } catch (IllegalArgumentException e) {
        assertEquals("Invalid size prefix '" + invalidPrefix + "' in '" + invalidFormatNumStr + "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)", e.getMessage());
    }
    assertEquals("0", long2String(0, null, 2));
    for (int decimalPlace = 0; decimalPlace < 2; decimalPlace++) {
        for (int n = 1; n < TraditionalBinaryPrefix.KILO.value; n++) {
            assertEquals(n + "", long2String(n, null, decimalPlace));
            assertEquals(-n + "", long2String(-n, null, decimalPlace));
        }
        assertEquals("1 K", long2String(1L << 10, null, decimalPlace));
        assertEquals("-1 K", long2String(-1L << 10, null, decimalPlace));
    }
    assertEquals("8.00 E", long2String(Long.MAX_VALUE, null, 2));
    assertEquals("8.00 E", long2String(Long.MAX_VALUE - 1, null, 2));
    assertEquals("-8 E", long2String(Long.MIN_VALUE, null, 2));
    assertEquals("-8.00 E", long2String(Long.MIN_VALUE + 1, null, 2));
    final String[] zeros = { " ", ".0 ", ".00 " };
    for (int decimalPlace = 0; decimalPlace < zeros.length; decimalPlace++) {
        final String trailingZeros = zeros[decimalPlace];
        for (int e = 11; e < Long.SIZE - 1; e++) {
            final TraditionalBinaryPrefix p = TraditionalBinaryPrefix.values()[e / 10 - 1];
            {
                final long n = 1L << e;
                final String expected = (n / p.value) + " " + p.symbol;
                assertEquals("n=" + n, expected, long2String(n, null, 2));
            }
            {
                final long n = (1L << e) + 1;
                final String expected = (n / p.value) + trailingZeros + p.symbol;
                assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
            }
            {
                final long n = (1L << e) - 1;
                final String expected = ((n + 1) / p.value) + trailingZeros + p.symbol;
                assertEquals("n=" + n, expected, long2String(n, null, decimalPlace));
            }
        }
    }
    assertEquals("1.50 K", long2String(3L << 9, null, 2));
    assertEquals("1.5 K", long2String(3L << 9, null, 1));
    assertEquals("1.50 M", long2String(3L << 19, null, 2));
    assertEquals("2 M", long2String(3L << 19, null, 0));
    assertEquals("3 G", long2String(3L << 30, null, 2));
    assertEquals("0 B", StringUtils.byteDesc(0));
    assertEquals("-100 B", StringUtils.byteDesc(-100));
    assertEquals("1 KB", StringUtils.byteDesc(1024));
    assertEquals("1.50 KB", StringUtils.byteDesc(3L << 9));
    assertEquals("1.50 MB", StringUtils.byteDesc(3L << 19));
    assertEquals("3 GB", StringUtils.byteDesc(3L << 30));
    assertEquals("10%", StringUtils.formatPercent(0.1, 0));
    assertEquals("10.0%", StringUtils.formatPercent(0.1, 1));
    assertEquals("10.00%", StringUtils.formatPercent(0.1, 2));
    assertEquals("1%", StringUtils.formatPercent(0.00543, 0));
    assertEquals("0.5%", StringUtils.formatPercent(0.00543, 1));
    assertEquals("0.54%", StringUtils.formatPercent(0.00543, 2));
    assertEquals("0.543%", StringUtils.formatPercent(0.00543, 3));
    assertEquals("0.5430%", StringUtils.formatPercent(0.00543, 4));
}
291968.4624106hadoop
public void run() {
    int opsProcessed = 0;
    Op op = null;
    Op firstOp = null;
    try {
        synchronized (this) {
            xceiver = Thread.currentThread();
        }
        dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
        peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
        InputStream input = socketIn;
        try {
            IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
            input = new BufferedInputStream(saslStreams.in, smallBufferSize);
            socketOut = saslStreams.out;
        } catch (InvalidMagicNumberException imne) {
            if (imne.isHandshake4Encryption()) {
                LOG.info("Failed to read expected encryption handshake from client " + "at {}. Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption", peer.getRemoteAddressString(), imne);
            } else {
                LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at {}" + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection", peer.getRemoteAddressString(), imne);
            }
            return;
        }
        super.initialize(new DataInputStream(input));
        do {
            updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
            try {
                if (opsProcessed != 0) {
                    assert dnConf.socketKeepaliveTimeout > 0;
                    peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
                } else {
                    peer.setReadTimeout(dnConf.socketTimeout);
                }
                op = readOp();
            } catch (InterruptedIOException ignored) {
                break;
            } catch (EOFException | ClosedChannelException e) {
                LOG.debug("Cached {} closing after {} ops.  " + "This message is usually benign.", peer, opsProcessed);
                break;
            } catch (IOException err) {
                incrDatanodeNetworkErrors();
                throw err;
            }
            if (opsProcessed != 0) {
                peer.setReadTimeout(dnConf.socketTimeout);
            }
            opStartTime = monotonicNow();
            if (firstOp == null) {
                firstOp = op;
                incrReadWriteOpMetrics(op);
            }
            processOp(op);
            ++opsProcessed;
        } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
        String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
        if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            } else {
                LOG.info("{}; {}", s, t.toString());
            }
        } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
            String s1 = "Likely the client has stopped reading, disconnecting it";
            s1 += " (" + s + ")";
            if (LOG.isTraceEnabled()) {
                LOG.trace(s1, t);
            } else {
                LOG.info("{}; {}", s1, t.toString());
            }
        } else if (t instanceof InvalidToken || t.getCause() instanceof InvalidToken) {
            LOG.trace(s, t);
        } else {
            LOG.error(s, t);
        }
    } finally {
        collectThreadLocalStates();
        LOG.debug("{}:Number of active connections is: {}", datanode.getDisplayName(), datanode.getXceiverCount());
        updateCurrentThreadName("Cleaning up");
        if (peer != null) {
            if (firstOp != null) {
                decrReadWriteOpMetrics(op);
            }
            dataXceiverServer.closePeer(peer);
            IOUtils.closeStream(in);
        }
    }
}
293016.6311115hadoop
public void replaceBlock(final ExtendedBlock block, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo proxySource, final String storageId) throws IOException {
    updateCurrentThreadName("Replacing block " + block + " from " + delHint);
    DataOutputStream replyOut = new DataOutputStream(getOutputStream());
    checkAccess(replyOut, true, block, blockToken, Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE, new StorageType[] { storageType }, new String[] { storageId });
    if (!dataXceiverServer.balanceThrottler.acquire()) {
        String msg = "Not able to receive block " + block.getBlockId() + " from " + peer.getRemoteAddressString() + " because threads " + "quota=" + dataXceiverServer.balanceThrottler.getMaxConcurrentMovers() + " is exceeded.";
        LOG.warn(msg);
        sendResponse(ERROR, msg);
        return;
    }
    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    Status opStatus = SUCCESS;
    String errMsg = null;
    DataInputStream proxyReply = null;
    boolean IoeDuringCopyBlockOperation = false;
    try {
        if (proxySource.equals(datanode.getDatanodeId())) {
            ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block, storageType, storageId);
            if (oldReplica != null) {
                LOG.info("Moved {} from StorageType {} to {}", block, oldReplica.getVolume().getStorageType(), storageType);
            }
        } else {
            block.setNumBytes(dataXceiverServer.estimateBlockSize);
            final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
            LOG.debug("Connecting to datanode {}", dnAddr);
            InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
            proxySock = datanode.newSocket();
            NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
            proxySock.setTcpNoDelay(dnConf.getDataTransferServerTcpNoDelay());
            proxySock.setSoTimeout(dnConf.socketTimeout);
            proxySock.setKeepAlive(true);
            OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock, dnConf.socketWriteTimeout);
            InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
            DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
            IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
            unbufProxyOut = saslStreams.out;
            unbufProxyIn = saslStreams.in;
            proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, smallBufferSize));
            proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, ioFileBufferSize));
            IoeDuringCopyBlockOperation = true;
            new Sender(proxyOut).copyBlock(block, blockToken);
            IoeDuringCopyBlockOperation = false;
            BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(proxyReply));
            String logInfo = "copy block " + block + " from " + proxySock.getRemoteSocketAddress();
            DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo, true);
            ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
            DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
            setCurrentBlockReceiver(getBlockReceiver(block, storageType, proxyReply, proxySock.getRemoteSocketAddress().toString(), proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind(), false, false, storageId));
            blockReceiver.receiveBlock(null, null, replyOut, null, dataXceiverServer.balanceThrottler, null, true);
            final Replica r = blockReceiver.getReplica();
            datanode.notifyNamenodeReceivedBlock(block, delHint, r.getStorageUuid(), r.isOnTransientStorage());
            LOG.info("Moved {} from {}, delHint={}", block, peer.getRemoteAddressString(), delHint);
            datanode.metrics.incrReplaceBlockOpToOtherHost();
        }
    } catch (IOException ioe) {
        opStatus = ERROR;
        if (ioe instanceof BlockPinningException) {
            opStatus = Status.ERROR_BLOCK_PINNED;
        }
        errMsg = "opReplaceBlock " + block + " received exception " + ioe;
        LOG.info(errMsg);
        if (!IoeDuringCopyBlockOperation) {
            incrDatanodeNetworkErrors();
        }
        throw ioe;
    } finally {
        if (opStatus == SUCCESS && proxyReply != null) {
            try {
                proxyReply.readChar();
            } catch (IOException ignored) {
            }
        }
        dataXceiverServer.balanceThrottler.release();
        try {
            sendResponse(opStatus, errMsg);
        } catch (IOException ioe) {
            LOG.warn("Error writing reply back to {}", peer.getRemoteAddressString());
            incrDatanodeNetworkErrors();
        }
        IOUtils.closeStream(proxyOut);
        IOUtils.closeStream(blockReceiver);
        IOUtils.closeStream(proxyReply);
        IOUtils.closeStream(replyOut);
    }
    datanode.metrics.addReplaceBlockOp(elapsed());
}
292260.0319106hadoop
private int testWriteAndRead(String fname, int loopN, int chunkSize, long readBeginPosition) throws IOException {
    int countOfFailures = 0;
    long byteVisibleToRead = 0;
    FSDataOutputStream out = null;
    byte[] outBuffer = new byte[BUFFER_SIZE];
    byte[] inBuffer = new byte[BUFFER_SIZE];
    for (int i = 0; i < BUFFER_SIZE; i++) {
        outBuffer[i] = (byte) (i & 0x00ff);
    }
    try {
        Path path = getFullyQualifiedPath(fname);
        long fileLengthBeforeOpen = 0;
        if (ifExists(path)) {
            if (truncateOption) {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.OVERWRITE)) : mfs.create(path, truncateOption);
                LOG.info("File already exists. File open with Truncate mode: " + path);
            } else {
                out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND)) : mfs.append(path);
                fileLengthBeforeOpen = getFileLengthFromNN(path);
                LOG.info("File already exists of size " + fileLengthBeforeOpen + " File open for Append mode: " + path);
            }
        } else {
            out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) : mfs.create(path);
        }
        long totalByteWritten = fileLengthBeforeOpen;
        long totalByteVisible = fileLengthBeforeOpen;
        long totalByteWrittenButNotVisible = 0;
        boolean toFlush;
        for (int i = 0; i < loopN; i++) {
            toFlush = (i % 2) == 0;
            writeData(out, outBuffer, chunkSize);
            totalByteWritten += chunkSize;
            if (toFlush) {
                out.hflush();
                totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
                totalByteWrittenButNotVisible = 0;
            } else {
                totalByteWrittenButNotVisible += chunkSize;
            }
            if (verboseOption) {
                LOG.info("TestReadWrite - Written " + chunkSize + ". Total written = " + totalByteWritten + ". TotalByteVisible = " + totalByteVisible + " to file " + fname);
            }
            byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
            String readmsg = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
            if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
                readmsg = "pass: reader sees expected number of visible byte. " + readmsg + " [pass]";
            } else {
                countOfFailures++;
                readmsg = "fail: reader see different number of visible byte. " + readmsg + " [fail]";
                if (abortTestOnFailure) {
                    throw new IOException(readmsg);
                }
            }
            LOG.info(readmsg);
        }
        writeData(out, outBuffer, chunkSize);
        totalByteWritten += chunkSize;
        totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
        totalByteWrittenButNotVisible += 0;
        out.close();
        byteVisibleToRead = readData(fname, inBuffer, totalByteVisible, readBeginPosition);
        String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible=" + totalByteVisible + " ; Got Visible=" + byteVisibleToRead + " of file " + fname;
        String readmsg;
        if (byteVisibleToRead >= totalByteVisible && byteVisibleToRead <= totalByteWritten) {
            readmsg = "pass: reader sees expected number of visible byte on close. " + readmsg2 + " [pass]";
        } else {
            countOfFailures++;
            readmsg = "fail: reader sees different number of visible byte on close. " + readmsg2 + " [fail]";
            LOG.info(readmsg);
            if (abortTestOnFailure)
                throw new IOException(readmsg);
        }
        long lenFromFc = getFileLengthFromNN(path);
        if (lenFromFc != byteVisibleToRead) {
            readmsg = "fail: reader sees different number of visible byte from NN " + readmsg2 + " [fail]";
            throw new IOException(readmsg);
        }
    } catch (IOException e) {
        throw new IOException("##### Caught Exception in testAppendWriteAndRead. Close file. " + "Total Byte Read so far = " + byteVisibleToRead, e);
    } finally {
        if (out != null)
            out.close();
    }
    return -countOfFailures;
}
293603.544118hadoop
public void testReportCommand() throws Exception {
    tearDown();
    redirectStream();
    final Configuration dfsConf = new HdfsConfiguration();
    ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
    dfsConf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    dfsConf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    final Path baseDir = new Path(PathUtils.getTestDir(getClass()).getAbsolutePath(), GenericTestUtils.getMethodName());
    dfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.toString());
    final int numDn = ecPolicy.getNumDataUnits() + ecPolicy.getNumParityUnits();
    try (MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(numDn).build()) {
        miniCluster.waitActive();
        assertEquals(numDn, miniCluster.getDataNodes().size());
        final DFSAdmin dfsAdmin = new DFSAdmin(dfsConf);
        final DFSClient client = miniCluster.getFileSystem().getClient();
        resetStream();
        assertEquals(0, ToolRunner.run(dfsAdmin, new String[] { "-report" }));
        verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
        final short replFactor = 1;
        final long fileLength = 512L;
        final DistributedFileSystem fs = miniCluster.getFileSystem();
        final Path file = new Path(baseDir, "/corrupted");
        fs.enableErasureCodingPolicy(ecPolicy.getName());
        DFSTestUtil.createFile(fs, file, fileLength, replFactor, 12345L);
        DFSTestUtil.waitReplication(fs, file, replFactor);
        final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
        LocatedBlocks lbs = miniCluster.getFileSystem().getClient().getNamenode().getBlockLocations(file.toString(), 0, fileLength);
        assertTrue("Unexpected block type: " + lbs.get(0), lbs.get(0) instanceof LocatedBlock);
        LocatedBlock locatedBlock = lbs.get(0);
        DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
        LOG.info("Replica block located on: " + locatedDataNode);
        Path ecDir = new Path(baseDir, "ec");
        fs.mkdirs(ecDir);
        fs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        Path ecFile = new Path(ecDir, "ec-file");
        int stripesPerBlock = 2;
        int cellSize = ecPolicy.getCellSize();
        int blockSize = stripesPerBlock * cellSize;
        int blockGroupSize = ecPolicy.getNumDataUnits() * blockSize;
        int totalBlockGroups = 1;
        DFSTestUtil.createStripedFile(miniCluster, ecFile, ecDir, totalBlockGroups, stripesPerBlock, false, ecPolicy);
        resetStream();
        assertEquals(0, ToolRunner.run(dfsAdmin, new String[] { "-report" }));
        verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
        final List<DataNode> datanodes = miniCluster.getDataNodes();
        DataNode dataNodeToShutdown = null;
        for (DataNode dn : datanodes) {
            if (!dn.getDatanodeId().getDatanodeUuid().equals(locatedDataNode.getDatanodeUuid())) {
                dataNodeToShutdown = dn;
                break;
            }
        }
        assertTrue("Unable to choose a DataNode to shutdown!", dataNodeToShutdown != null);
        LOG.info("Shutting down: " + dataNodeToShutdown);
        dataNodeToShutdown.shutdown();
        miniCluster.setDataNodeDead(dataNodeToShutdown.getDatanodeId());
        assertEquals(0, ToolRunner.run(dfsAdmin, new String[] { "-report" }));
        verifyNodesAndCorruptBlocks(numDn, numDn - 1, 0, 0, client, 0L, 1L);
        final int blockFilesCorrupted = miniCluster.corruptBlockOnDataNodes(block);
        assertEquals("Fail to corrupt all replicas for block " + block, replFactor, blockFilesCorrupted);
        try {
            IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
            fail("Should have failed to read the file with corrupted blocks.");
        } catch (ChecksumException ignored) {
        }
        fs.setReplication(file, (short) (replFactor + 1));
        BlockManagerTestUtil.updateState(miniCluster.getNameNode().getNamesystem().getBlockManager());
        waitForCorruptBlock(miniCluster, client, file);
        resetStream();
        assertEquals(0, ToolRunner.run(dfsAdmin, new String[] { "-report" }));
        verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 0, client, 0L, 1L);
        lbs = miniCluster.getFileSystem().getClient().getNamenode().getBlockLocations(ecFile.toString(), 0, blockGroupSize);
        assertTrue("Unexpected block type: " + lbs.get(0), lbs.get(0) instanceof LocatedStripedBlock);
        LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
        miniCluster.getNamesystem().writeLock();
        try {
            BlockManager bm = miniCluster.getNamesystem().getBlockManager();
            bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0], "STORAGE_ID", "TEST");
            BlockManagerTestUtil.updateState(bm);
        } finally {
            miniCluster.getNamesystem().writeUnlock();
        }
        waitForCorruptBlock(miniCluster, client, file);
        resetStream();
        assertEquals(0, ToolRunner.run(dfsAdmin, new String[] { "-report" }));
        verifyNodesAndCorruptBlocks(numDn, numDn - 1, 1, 1, client, 0L, 0L);
        resetStream();
        String[] reportWithArg = new String[DFSAdmin.DFS_REPORT_ARGS.length + 1];
        reportWithArg[0] = "-report";
        System.arraycopy(DFSAdmin.DFS_REPORT_ARGS, 0, reportWithArg, 1, DFSAdmin.DFS_REPORT_ARGS.length);
        assertEquals(0, ToolRunner.run(dfsAdmin, reportWithArg));
    }
}
293431.182121hadoop
public void testResponseCode() throws IOException {
    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) fs;
    final Path root = new Path("/");
    final Path dir = new Path("/test/testUrl");
    assertTrue(webhdfs.mkdirs(dir));
    final Path file = new Path("/test/file");
    final FSDataOutputStream out = webhdfs.create(file);
    out.write(1);
    out.close();
    {
        final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        assertEquals(WebHdfsTestUtil.sendRequest(conn), HttpServletResponse.SC_OK);
        final Map<?, ?> m = WebHdfsTestUtil.getAndParseResponse(conn);
        assertEquals(webhdfs.getHomeDirectory().toUri().getPath(), m.get(Path.class.getSimpleName()));
        conn.disconnect();
    }
    {
        final URL url = webhdfs.toUrl(GetOpParam.Op.GETHOMEDIRECTORY, root, new DoAsParam(ugi.getShortUserName() + "proxy"));
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.connect();
        assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
        conn.disconnect();
    }
    {
        final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.connect();
        assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
        conn.disconnect();
    }
    {
        final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
        final URL url = webhdfs.toUrl(op, dir);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.connect();
        assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
        assertFalse(webhdfs.setReplication(dir, (short) 1));
        conn.disconnect();
    }
    {
        final Path p = new Path(dir, "non-exist");
        final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.connect();
        assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
        conn.disconnect();
    }
    {
        final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
        final URL url = webhdfs.toUrl(op, dir);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.connect();
        assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
        assertEquals(0, conn.getContentLength());
        assertEquals(MediaType.APPLICATION_OCTET_STREAM, conn.getContentType());
        assertEquals((short) 0755, webhdfs.getFileStatus(dir).getPermission().toShort());
        conn.disconnect();
    }
    {
        AppendTestUtil.testAppend(fs, new Path(dir, "append"));
    }
    {
        final HttpOpParam.Op op = PutOpParam.Op.CREATE;
        final URL url = webhdfs.toUrl(op, dir);
        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.setDoOutput(false);
        conn.setInstanceFollowRedirects(false);
        conn.connect();
        final String redirect = conn.getHeaderField("Location");
        conn.disconnect();
        WebHdfsFileSystem.LOG.info("redirect = " + redirect);
        final int i = redirect.indexOf(NamenodeAddressParam.NAME);
        final int j = redirect.indexOf("&", i);
        String modified = redirect.substring(0, i - 1) + redirect.substring(j);
        WebHdfsFileSystem.LOG.info("modified = " + modified);
        conn = (HttpURLConnection) new URL(modified).openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.setDoOutput(op.getDoOutput());
        conn.connect();
        assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
    }
    {
        final HttpOpParam.Op op = GetOpParam.Op.OPEN;
        final URL url = webhdfs.toUrl(op, file);
        final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.connect();
        try {
            WebHdfsFileSystem.jsonParse(conn, false);
            fail();
        } catch (IOException ioe) {
            WebHdfsFileSystem.LOG.info("GOOD", ioe);
        }
        conn.disconnect();
    }
    {
        HttpOpParam.Op op = PutOpParam.Op.CREATE;
        Path path = new Path("/test/path with spaces");
        URL url = webhdfs.toUrl(op, path);
        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.setDoOutput(false);
        conn.setInstanceFollowRedirects(false);
        final String redirect;
        try {
            conn.connect();
            assertEquals(HttpServletResponse.SC_TEMPORARY_REDIRECT, conn.getResponseCode());
            redirect = conn.getHeaderField("Location");
        } finally {
            conn.disconnect();
        }
        conn = (HttpURLConnection) new URL(redirect).openConnection();
        conn.setRequestMethod(op.getType().toString());
        conn.setDoOutput(op.getDoOutput());
        try {
            conn.connect();
            assertEquals(HttpServletResponse.SC_CREATED, conn.getResponseCode());
        } finally {
            conn.disconnect();
        }
    }
}
293527.395115hadoop
public void testNetworkedJob() throws Exception {
    MiniMRClientCluster mr = null;
    FileSystem fileSys = null;
    try {
        mr = createMiniClusterWithCapacityScheduler();
        JobConf job = new JobConf(mr.getConfig());
        fileSys = FileSystem.get(job);
        fileSys.delete(testDir, true);
        FSDataOutputStream out = fileSys.create(inFile, true);
        out.writeBytes("This is a test file");
        out.close();
        FileInputFormat.setInputPaths(job, inFile);
        FileOutputFormat.setOutputPath(job, outDir);
        job.setInputFormat(TextInputFormat.class);
        job.setOutputFormat(TextOutputFormat.class);
        job.setMapperClass(IdentityMapper.class);
        job.setReducerClass(IdentityReducer.class);
        job.setNumReduceTasks(0);
        JobClient client = new JobClient(mr.getConfig());
        RunningJob rj = client.submitJob(job);
        JobID jobId = rj.getID();
        NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
        runningJob.setJobPriority(JobPriority.HIGH.name());
        assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
        assertEquals(jobId, runningJob.getID());
        assertEquals(jobId.toString(), runningJob.getJobID());
        assertEquals("N/A", runningJob.getJobName());
        assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
        assertTrue(runningJob.getTrackingURL().length() > 0);
        assertThat(runningJob.mapProgress()).isEqualTo(0.0f);
        assertThat(runningJob.reduceProgress()).isEqualTo(0.0f);
        assertThat(runningJob.cleanupProgress()).isEqualTo(0.0f);
        assertThat(runningJob.setupProgress()).isEqualTo(0.0f);
        TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
        assertEquals(tce.length, 0);
        assertEquals("", runningJob.getHistoryUrl());
        assertFalse(runningJob.isRetired());
        assertEquals("", runningJob.getFailureInfo());
        assertEquals("N/A", runningJob.getJobStatus().getJobName());
        assertEquals(0, client.getMapTaskReports(jobId).length);
        try {
            client.getSetupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_SETUP", e.getMessage());
        }
        try {
            client.getCleanupTaskReports(jobId);
        } catch (YarnRuntimeException e) {
            assertEquals("Unrecognized task type: JOB_CLEANUP", e.getMessage());
        }
        assertEquals(0, client.getReduceTaskReports(jobId).length);
        ClusterStatus status = client.getClusterStatus(true);
        assertEquals(2, status.getActiveTrackerNames().size());
        assertEquals(0, status.getBlacklistedTrackers());
        assertEquals(0, status.getBlacklistedTrackerNames().size());
        assertEquals(0, status.getBlackListedTrackersInfo().size());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(1, status.getMapTasks());
        assertEquals(20, status.getMaxMapTasks());
        assertEquals(4, status.getMaxReduceTasks());
        assertEquals(0, status.getNumExcludedNodes());
        assertEquals(1, status.getReduceTasks());
        assertEquals(2, status.getTaskTrackers());
        assertEquals(0, status.getTTExpiryInterval());
        assertEquals(JobTrackerStatus.RUNNING, status.getJobTrackerStatus());
        assertEquals(0, status.getGraylistedTrackers());
        ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
        status.write(new DataOutputStream(dataOut));
        ClusterStatus status2 = new ClusterStatus();
        status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
        assertEquals(status.getActiveTrackerNames(), status2.getActiveTrackerNames());
        assertEquals(status.getBlackListedTrackersInfo(), status2.getBlackListedTrackersInfo());
        assertEquals(status.getMapTasks(), status2.getMapTasks());
        JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
        assertEquals(TaskStatusFilter.ALL, JobClient.getTaskOutputFilter(job));
        assertEquals(20, client.getDefaultMaps());
        assertEquals(4, client.getDefaultReduces());
        assertEquals("jobSubmitDir", client.getSystemDir().getName());
        JobQueueInfo[] rootQueueInfo = client.getRootQueues();
        assertEquals(1, rootQueueInfo.length);
        assertEquals("default", rootQueueInfo[0].getQueueName());
        JobQueueInfo[] qinfo = client.getQueues();
        assertEquals(1, qinfo.length);
        assertEquals("default", qinfo[0].getQueueName());
        assertEquals(0, client.getChildQueues("default").length);
        assertEquals(1, client.getJobsFromQueue("default").length);
        assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
        JobQueueInfo qi = client.getQueueInfo("default");
        assertEquals("default", qi.getQueueName());
        assertEquals("running", qi.getQueueState());
        QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
        assertEquals(2, aai.length);
        assertEquals("root", aai[0].getQueueName());
        assertEquals("root.default", aai[1].getQueueName());
        assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId).getJobStatus().getJobID());
        assertEquals("Expected matching startTimes", rj.getJobStatus().getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
    } finally {
        if (fileSys != null) {
            fileSys.delete(testDir, true);
        }
        if (mr != null) {
            mr.stop();
        }
    }
}
292340.1612115hadoop
 boolean parseArguments(String[] args) throws IOException {
    Options opts = new Options();
    opts.addOption(Option.builder("h").build());
    opts.addOption(Option.builder("help").build());
    opts.addOption(Option.builder("input").desc("Input class path. Defaults to the default classpath.").hasArg().build());
    opts.addOption(Option.builder("whitelist").desc("Regex specifying the full path of jars to include in the" + " framework tarball. Default is a hardcoded set of jars" + " considered necessary to include").hasArg().build());
    opts.addOption(Option.builder("blacklist").desc("Regex specifying the full path of jars to exclude in the" + " framework tarball. Default is a hardcoded set of jars" + " considered unnecessary to include").hasArg().build());
    opts.addOption(Option.builder("fs").desc("Target file system to upload to." + " Example: hdfs://foo.com:8020").hasArg().build());
    opts.addOption(Option.builder("target").desc("Target file to upload to with a reference name." + " Example: /usr/mr-framework.tar.gz#mr-framework").hasArg().build());
    opts.addOption(Option.builder("initialReplication").desc("Desired initial replication count. Default 3.").hasArg().build());
    opts.addOption(Option.builder("finalReplication").desc("Desired final replication count. Default 10.").hasArg().build());
    opts.addOption(Option.builder("acceptableReplication").desc("Desired acceptable replication count. Default 9.").hasArg().build());
    opts.addOption(Option.builder("timeout").desc("Desired timeout for the acceptable" + " replication in seconds. Default 10").hasArg().build());
    opts.addOption(Option.builder("nosymlink").desc("Ignore symlinks into the same directory").build());
    GenericOptionsParser parser = new GenericOptionsParser(opts, args);
    if (parser.getCommandLine().hasOption("help") || parser.getCommandLine().hasOption("h")) {
        printHelp(opts);
        return false;
    }
    input = parser.getCommandLine().getOptionValue("input", System.getProperty("java.class.path"));
    whitelist = parser.getCommandLine().getOptionValue("whitelist", DefaultJars.DEFAULT_MR_JARS);
    blacklist = parser.getCommandLine().getOptionValue("blacklist", DefaultJars.DEFAULT_EXCLUDED_MR_JARS);
    initialReplication = Short.parseShort(parser.getCommandLine().getOptionValue("initialReplication", "3"));
    finalReplication = Short.parseShort(parser.getCommandLine().getOptionValue("finalReplication", "10"));
    acceptableReplication = Short.parseShort(parser.getCommandLine().getOptionValue("acceptableReplication", "9"));
    timeout = Integer.parseInt(parser.getCommandLine().getOptionValue("timeout", "10"));
    if (parser.getCommandLine().hasOption("nosymlink")) {
        ignoreSymlink = true;
    }
    String fs = parser.getCommandLine().getOptionValue("fs", null);
    String path = parser.getCommandLine().getOptionValue("target", "/usr/lib/mr-framework.tar.gz#mr-framework");
    boolean isFullPath = path.startsWith("hdfs://") || path.startsWith("file://");
    if (fs == null) {
        fs = conf.getTrimmed(FS_DEFAULT_NAME_KEY);
        if (fs == null && !isFullPath) {
            LOG.error("No filesystem specified in either fs or target.");
            printHelp(opts);
            return false;
        } else {
            LOG.info(String.format("Target file system not specified. Using default %s", fs));
        }
    }
    if (path.isEmpty()) {
        LOG.error("Target directory not specified");
        printHelp(opts);
        return false;
    }
    StringBuilder absolutePath = new StringBuilder();
    if (!isFullPath) {
        absolutePath.append(fs);
        absolutePath.append(path.startsWith("/") ? "" : "/");
    }
    absolutePath.append(path);
    target = absolutePath.toString();
    if (parser.getRemainingArgs().length > 0) {
        LOG.warn("Unexpected parameters");
        printHelp(opts);
        return false;
    }
    return true;
}
292541.119108hadoop
private AbfsRestOperation callOperation(AzureBlobFileSystem fs, Path testPath, EncryptionContextProvider ecp) throws Exception {
    AbfsClient client = fs.getAbfsClient();
    AbfsClientUtils.setEncryptionContextProvider(client, ecp);
    if (isExceptionCase) {
        LambdaTestUtils.intercept(IOException.class, () -> {
            switch(operation) {
                case WRITE:
                    try (FSDataOutputStream out = fs.append(testPath)) {
                        out.write("bytes".getBytes());
                    }
                    break;
                case READ:
                    try (FSDataInputStream in = fs.open(testPath)) {
                        in.read(new byte[5]);
                    }
                    break;
                case SET_ATTR:
                    fs.setXAttr(testPath, "attribute", "value".getBytes());
                    break;
                case GET_ATTR:
                    fs.getXAttr(testPath, "attribute");
                    break;
                default:
                    throw new NoSuchFieldException();
            }
        });
        return null;
    } else {
        ContextProviderEncryptionAdapter encryptionAdapter = null;
        if (fileEncryptionType == ENCRYPTION_CONTEXT) {
            encryptionAdapter = new ContextProviderEncryptionAdapter(ecp, fs.getAbfsStore().getRelativePath(testPath), Base64.getEncoder().encode(((MockEncryptionContextProvider) ecp).getEncryptionContextForTest(testPath.toString()).getBytes(StandardCharsets.UTF_8)));
        }
        String path = testPath.toString();
        switch(operation) {
            case READ:
                if (!fileSystemListStatusResultToBeUsedForOpeningFile || fileEncryptionType != ENCRYPTION_CONTEXT) {
                    TracingContext tracingContext = getTestTracingContext(fs, true);
                    AbfsHttpOperation statusOp = client.getPathStatus(path, false, tracingContext, null).getResult();
                    return client.read(path, 0, new byte[5], 0, 5, statusOp.getResponseHeader(HttpHeaderConfigurations.ETAG), null, encryptionAdapter, tracingContext);
                } else {
                    FileStatus status = fs.listStatus(testPath)[0];
                    Assertions.assertThat(status).isInstanceOf(AzureBlobFileSystemStore.VersionedFileStatus.class);
                    Assertions.assertThat(((AzureBlobFileSystemStore.VersionedFileStatus) status).getEncryptionContext()).isNotNull();
                    try (FSDataInputStream in = fs.openFileWithOptions(testPath, new OpenFileParameters().withMandatoryKeys(new HashSet<>()).withStatus(fs.listStatus(testPath)[0])).get()) {
                        byte[] readBuffer = new byte[3];
                        Assertions.assertThat(in.read(readBuffer)).isGreaterThan(0);
                        Assertions.assertThat(readBuffer).isEqualTo(SERVER_FILE_CONTENT.getBytes());
                        return null;
                    }
                }
            case WRITE:
                return client.flush(path, 3, false, false, null, null, encryptionAdapter, getTestTracingContext(fs, false));
            case APPEND:
                return client.append(path, "val".getBytes(), new AppendRequestParameters(3, 0, 3, APPEND_MODE, false, null, true), null, encryptionAdapter, getTestTracingContext(fs, false));
            case SET_ACL:
                return client.setAcl(path, AclEntry.aclSpecToString(Lists.newArrayList(aclEntry(ACCESS, USER, ALL))), getTestTracingContext(fs, false));
            case LISTSTATUS:
                return client.listPath(path, false, 5, null, getTestTracingContext(fs, true));
            case RENAME:
                TracingContext tc = getTestTracingContext(fs, true);
                return client.renamePath(path, new Path(path + "_2").toString(), null, tc, null, false, fs.getIsNamespaceEnabled(tc)).getOp();
            case DELETE:
                TracingContext testTC = getTestTracingContext(fs, false);
                return client.deletePath(path, false, null, testTC, fs.getIsNamespaceEnabled(testTC));
            case GET_ATTR:
                return client.getPathStatus(path, true, getTestTracingContext(fs, false), createEncryptionAdapterFromServerStoreContext(path, getTestTracingContext(fs, false), client));
            case SET_ATTR:
                Hashtable<String, String> properties = new Hashtable<>();
                properties.put("key", "{ value: valueTest }");
                return client.setPathProperties(path, fs.getAbfsStore().convertXmsPropertiesToCommaSeparatedString(properties), getTestTracingContext(fs, false), createEncryptionAdapterFromServerStoreContext(path, getTestTracingContext(fs, false), client));
            case SET_PERMISSION:
                return client.setPermission(path, FsPermission.getDefault().toString(), getTestTracingContext(fs, false));
            default:
                throw new NoSuchFieldException();
        }
    }
}
292948.2315103hadoop
public boolean init(String[] args) throws ParseException, IOException {
    List<String> list = Arrays.asList(args);
    if (list.contains("-h") || list.contains("--help")) {
        printUsage();
        return false;
    }
    CommandLineParser parser = new GnuParser();
    CommandLine commandLine = parser.parse(opts, args);
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(getConf());
    LOG.info("Starting with arguments: [\"{}\"]", Joiner.on("\" \"").join(args));
    Path fsImageDir = new Path(commandLine.getOptionValue(FS_IMAGE_DIR_ARG, ""));
    versionFilePath = new Path(fsImageDir, "VERSION").toString();
    if (commandLine.hasOption(NAMENODE_SERVICERPC_ADDR_ARG)) {
        launchNameNode = false;
        remoteNameNodeRpcAddress = commandLine.getOptionValue(NAMENODE_SERVICERPC_ADDR_ARG);
    } else {
        launchNameNode = true;
        FileSystem localFS = FileSystem.getLocal(getConf());
        fsImageDir = fsImageDir.makeQualified(localFS.getUri(), localFS.getWorkingDirectory());
        FileSystem fsImageFS = fsImageDir.getFileSystem(getConf());
        FileStatus[] fsImageFiles = fsImageFS.listStatus(fsImageDir, (path) -> path.getName().matches("^fsimage_(\\d)+$"));
        if (fsImageFiles.length != 1) {
            throw new IllegalArgumentException("Must be exactly one fsimage file present in fs_image_dir");
        }
        fsImagePath = fsImageFiles[0].getPath().toString();
        fsImageMD5Path = fsImageFiles[0].getPath().suffix(".md5").toString();
    }
    if (amMemory < 0) {
        throw new IllegalArgumentException("Invalid memory specified for " + "application master, exiting. Specified memory=" + amMemory);
    }
    if (amVCores < 0) {
        throw new IllegalArgumentException("Invalid virtual cores specified for " + "application master, exiting. Specified virtual cores=" + amVCores);
    }
    this.appName = commandLine.getOptionValue(APPNAME_ARG, APPNAME_DEFAULT);
    this.amQueue = commandLine.getOptionValue(QUEUE_ARG, QUEUE_DEFAULT);
    this.amMemory = Integer.parseInt(commandLine.getOptionValue(MASTER_MEMORY_MB_ARG, MASTER_MEMORY_MB_DEFAULT));
    this.amVCores = Integer.parseInt(commandLine.getOptionValue(MASTER_VCORES_ARG, MASTER_VCORES_DEFAULT));
    this.confPath = commandLine.getOptionValue(CONF_PATH_ARG);
    this.blockListPath = commandLine.getOptionValue(BLOCK_LIST_PATH_ARG);
    if (commandLine.hasOption(HADOOP_BINARY_PATH_ARG)) {
        this.hadoopBinary = commandLine.getOptionValue(HADOOP_BINARY_PATH_ARG);
    } else {
        this.hadoopBinary = DynoInfraUtils.fetchHadoopTarball(new File(".").getAbsoluteFile(), commandLine.getOptionValue(HADOOP_VERSION_ARG), getConf(), LOG).toString();
    }
    this.amOptions = AMOptions.initFromParser(commandLine);
    this.clientTimeout = Integer.parseInt(commandLine.getOptionValue(TIMEOUT_ARG, TIMEOUT_DEFAULT));
    this.tokenFileLocation = commandLine.getOptionValue(TOKEN_FILE_LOCATION_ARG);
    amOptions.verify();
    Path blockPath = new Path(blockListPath);
    FileSystem blockListFS = blockPath.getFileSystem(getConf());
    if (blockListFS.getUri().equals(FileSystem.getLocal(getConf()).getUri()) || !blockListFS.exists(blockPath)) {
        throw new IllegalArgumentException("block list path must already exist on remote fs!");
    }
    numTotalDataNodes = blockListFS.listStatus(blockPath, DynoConstants.BLOCK_LIST_FILE_FILTER).length;
    if (commandLine.hasOption(WORKLOAD_REPLAY_ENABLE_ARG)) {
        if (!commandLine.hasOption(WORKLOAD_INPUT_PATH_ARG) || !commandLine.hasOption(WORKLOAD_START_DELAY_ARG)) {
            throw new IllegalArgumentException("workload_replay_enable was " + "specified; must include all required workload_ parameters.");
        }
        launchWorkloadJob = true;
        workloadInputPath = commandLine.getOptionValue(WORKLOAD_INPUT_PATH_ARG);
        workloadOutputPath = commandLine.getOptionValue(WORKLOAD_OUTPUT_PATH_ARG);
        workloadThreadsPerMapper = Integer.parseInt(commandLine.getOptionValue(WORKLOAD_THREADS_PER_MAPPER_ARG, String.valueOf(AuditReplayMapper.NUM_THREADS_DEFAULT)));
        workloadRateFactor = Double.parseDouble(commandLine.getOptionValue(WORKLOAD_RATE_FACTOR_ARG, WORKLOAD_RATE_FACTOR_DEFAULT));
        workloadExtraConfigs = new HashMap<>();
        if (commandLine.getOptionValues(WORKLOAD_CONFIG_ARG) != null) {
            for (String opt : commandLine.getOptionValues(WORKLOAD_CONFIG_ARG)) {
                Iterator<String> kvPair = Splitter.on("=").trimResults().split(opt).iterator();
                workloadExtraConfigs.put(kvPair.next(), kvPair.next());
            }
        }
        String delayString = commandLine.getOptionValue(WORKLOAD_START_DELAY_ARG, WorkloadDriver.START_TIME_OFFSET_DEFAULT);
        getConf().set("___temp___", delayString);
        workloadStartDelayMs = getConf().getTimeDuration("___temp___", 0, TimeUnit.MILLISECONDS);
    }
    return true;
}
292881.162096hadoop
private void setupRemoteResource(ApplicationId appId, DynoResource resource, Map<String, String> env, String... srcPaths) throws IOException {
    FileStatus remoteFileStatus;
    Path dstPath;
    Preconditions.checkArgument(srcPaths.length > 0, "Must supply at least one source path");
    Preconditions.checkArgument(resource.getType() == LocalResourceType.ARCHIVE || srcPaths.length == 1, "Can only specify multiple source paths if using an ARCHIVE type");
    List<URI> srcURIs = Arrays.stream(srcPaths).map(URI::create).collect(Collectors.toList());
    Set<String> srcSchemes = srcURIs.stream().map(URI::getScheme).collect(Collectors.toSet());
    Preconditions.checkArgument(srcSchemes.size() == 1, "All source paths must have the same scheme");
    String srcScheme = srcSchemes.iterator().next();
    String srcPathString = "[" + Joiner.on(",").join(srcPaths) + "]";
    if (srcScheme == null || srcScheme.equals(FileSystem.getLocal(getConf()).getScheme()) || srcScheme.equals("jar")) {
        List<File> srcFiles = srcURIs.stream().map(URI::getSchemeSpecificPart).map(File::new).collect(Collectors.toList());
        Path dstPathBase = getRemoteStoragePath(getConf(), appId);
        boolean shouldArchive = srcFiles.size() > 1 || srcFiles.get(0).isDirectory() || (resource.getType() == LocalResourceType.ARCHIVE && Arrays.stream(ARCHIVE_FILE_TYPES).noneMatch(suffix -> srcFiles.get(0).getName().endsWith(suffix)));
        if (shouldArchive) {
            if ("jar".equals(srcScheme)) {
                throw new IllegalArgumentException(String.format("Resources in JARs " + "can't be zipped; resource %s is ARCHIVE and src is: %s", resource.getResourcePath(), srcPathString));
            } else if (resource.getType() != LocalResourceType.ARCHIVE) {
                throw new IllegalArgumentException(String.format("Resource type is %s but srcPaths were: %s", resource.getType(), srcPathString));
            }
            dstPath = new Path(dstPathBase, resource.getResourcePath()).suffix(".zip");
        } else {
            dstPath = new Path(dstPathBase, srcFiles.get(0).getName());
        }
        FileSystem remoteFS = dstPath.getFileSystem(getConf());
        LOG.info("Uploading resource " + resource + " from " + srcPathString + " to " + dstPath);
        try (OutputStream outputStream = remoteFS.create(dstPath, true)) {
            if ("jar".equals(srcScheme)) {
                try (InputStream inputStream = new URL(srcPaths[0]).openStream()) {
                    IOUtils.copyBytes(inputStream, outputStream, getConf());
                }
            } else if (shouldArchive) {
                List<File> filesToZip;
                if (srcFiles.size() == 1 && srcFiles.get(0).isDirectory()) {
                    File[] childFiles = srcFiles.get(0).listFiles();
                    if (childFiles == null || childFiles.length == 0) {
                        throw new IllegalArgumentException("Specified a directory to archive with no contents");
                    }
                    filesToZip = Lists.newArrayList(childFiles);
                } else {
                    filesToZip = srcFiles;
                }
                ZipOutputStream zout = new ZipOutputStream(outputStream);
                for (File fileToZip : filesToZip) {
                    addFileToZipRecursively(fileToZip.getParentFile(), fileToZip, zout);
                }
                zout.close();
            } else {
                try (InputStream inputStream = new FileInputStream(srcFiles.get(0))) {
                    IOUtils.copyBytes(inputStream, outputStream, getConf());
                }
            }
        }
        remoteFileStatus = remoteFS.getFileStatus(dstPath);
    } else {
        if (srcPaths.length > 1) {
            throw new IllegalArgumentException("If resource is on remote, must be " + "a single file: " + srcPathString);
        }
        LOG.info("Using resource {} directly from current location: {}", resource, srcPaths[0]);
        dstPath = new Path(srcPaths[0]);
        remoteFileStatus = FileSystem.get(dstPath.toUri(), getConf()).getFileStatus(dstPath);
        if (remoteFileStatus.isDirectory()) {
            throw new IllegalArgumentException("If resource is on remote " + "filesystem, must be a file: " + srcPaths[0]);
        }
    }
    env.put(resource.getLocationEnvVar(), dstPath.toString());
    env.put(resource.getTimestampEnvVar(), String.valueOf(remoteFileStatus.getModificationTime()));
    env.put(resource.getLengthEnvVar(), String.valueOf(remoteFileStatus.getLen()));
}
293296.89109hadoop
public static void setupClass() throws Exception {
    PlatformAssumptions.assumeNotWindows("Dynamometer will not run on Windows");
    Assume.assumeThat("JAVA_HOME must be set properly", System.getenv("JAVA_HOME"), notNullValue());
    try {
        Shell.ShellCommandExecutor tarCheck = new Shell.ShellCommandExecutor(new String[] { "bash", "-c", "command -v tar" });
        tarCheck.execute();
        Assume.assumeTrue("tar command is not available", tarCheck.getExitCode() == 0);
    } catch (IOException ioe) {
        Assume.assumeNoException("Unable to execute a shell command", ioe);
    }
    conf = new Configuration();
    testBaseDir = new File(System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data"));
    String hadoopBinVersion = System.getProperty(HADOOP_BIN_VERSION_KEY, HADOOP_BIN_VERSION_DEFAULT);
    if (System.getProperty(HADOOP_BIN_PATH_KEY) == null) {
        hadoopTarballPath = fetchHadoopTarball(testBaseDir, hadoopBinVersion, conf, LOG);
    } else {
        hadoopTarballPath = new File(System.getProperty(HADOOP_BIN_PATH_KEY));
    }
    if (testBaseDir.exists()) {
        File[] oldUnpackedDirs = testBaseDir.listFiles((dir, name) -> name.startsWith(HADOOP_BIN_UNPACKED_DIR_PREFIX));
        if (oldUnpackedDirs != null) {
            for (File oldDir : oldUnpackedDirs) {
                FileUtils.deleteQuietly(oldDir);
            }
        }
    }
    hadoopUnpackedDir = new File(testBaseDir, HADOOP_BIN_UNPACKED_DIR_PREFIX + UUID.randomUUID());
    assertTrue("Failed to make temporary directory", hadoopUnpackedDir.mkdirs());
    Shell.ShellCommandExecutor shexec = new Shell.ShellCommandExecutor(new String[] { "tar", "xzf", hadoopTarballPath.getAbsolutePath(), "-C", hadoopUnpackedDir.getAbsolutePath() });
    shexec.execute();
    if (shexec.getExitCode() != 0) {
        fail("Unable to execute tar to expand Hadoop binary");
    }
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128);
    conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
    for (String q : new String[] { "root", "root.default" }) {
        conf.setInt(CapacitySchedulerConfiguration.PREFIX + q + "." + CapacitySchedulerConfiguration.CAPACITY, 100);
        String accessibleNodeLabelPrefix = CapacitySchedulerConfiguration.PREFIX + q + "." + CapacitySchedulerConfiguration.ACCESSIBLE_NODE_LABELS;
        conf.set(accessibleNodeLabelPrefix, CapacitySchedulerConfiguration.ALL_ACL);
        conf.setInt(accessibleNodeLabelPrefix + "." + DATANODE_NODELABEL + "." + CapacitySchedulerConfiguration.CAPACITY, 100);
        conf.setInt(accessibleNodeLabelPrefix + "." + NAMENODE_NODELABEL + "." + CapacitySchedulerConfiguration.CAPACITY, 100);
    }
    conf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class, ResourceCalculator.class);
    conf.setBoolean(YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, false);
    miniYARNCluster = new MiniYARNCluster(TestDynamometerInfra.class.getName(), 1, MINICLUSTER_NUM_NMS, 1, 1);
    miniYARNCluster.init(conf);
    miniYARNCluster.start();
    yarnConf = miniYARNCluster.getConfig();
    miniDFSCluster = new MiniDFSCluster.Builder(conf).format(true).numDataNodes(MINICLUSTER_NUM_DNS).build();
    miniDFSCluster.waitClusterUp();
    FileSystem.setDefaultUri(conf, miniDFSCluster.getURI());
    FileSystem.setDefaultUri(yarnConf, miniDFSCluster.getURI());
    fs = miniDFSCluster.getFileSystem();
    URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml");
    if (url == null) {
        throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
    }
    yarnConf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, new File(url.getPath()).getParent());
    try (ByteArrayOutputStream bytesOut = new ByteArrayOutputStream()) {
        yarnConf.writeXml(bytesOut);
        try (OutputStream fileOut = new FileOutputStream(new File(url.getPath()))) {
            fileOut.write(bytesOut.toByteArray());
        }
    }
    yarnClient = YarnClient.createYarnClient();
    yarnClient.init(new Configuration(yarnConf));
    yarnClient.start();
    fsImageTmpPath = fs.makeQualified(new Path("/tmp/" + FSIMAGE_FILENAME));
    fsVersionTmpPath = fs.makeQualified(new Path("/tmp/" + VERSION_FILENAME));
    blockImageOutputDir = fs.makeQualified(new Path("/tmp/blocks"));
    auditTraceDir = fs.makeQualified(new Path("/tmp/audit_trace_direct"));
    confZip = fs.makeQualified(new Path("/tmp/conf.zip"));
    uploadFsimageResourcesToHDFS(hadoopBinVersion);
    miniYARNCluster.waitForNodeManagersToConnect(30000);
    RMNodeLabelsManager nodeLabelManager = miniYARNCluster.getResourceManager().getRMContext().getNodeLabelManager();
    nodeLabelManager.addToCluserNodeLabelsWithDefaultExclusivity(Sets.newHashSet(NAMENODE_NODELABEL, DATANODE_NODELABEL));
    Map<NodeId, Set<String>> nodeLabels = new HashMap<>();
    nodeLabels.put(miniYARNCluster.getNodeManager(0).getNMContext().getNodeId(), Sets.newHashSet(NAMENODE_NODELABEL));
    nodeLabels.put(miniYARNCluster.getNodeManager(1).getNMContext().getNodeId(), Sets.newHashSet(DATANODE_NODELABEL));
    nodeLabelManager.addLabelsToNode(nodeLabels);
}
292248.021136hadoop
public void testHelp() throws Exception {
    PrintStream oldOutPrintStream = System.out;
    PrintStream oldErrPrintStream = System.err;
    ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
    ByteArrayOutputStream dataErr = new ByteArrayOutputStream();
    System.setOut(new PrintStream(dataOut));
    System.setErr(new PrintStream(dataErr));
    try {
        String[] args = { "-help" };
        assertEquals(0, rmAdminCLI.run(args));
        oldOutPrintStream.println(dataOut);
        assertTrue(dataOut.toString().contains("rmadmin is the command to execute YARN administrative commands."));
        assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes " + "[-g|graceful [timeout in seconds] -client|server]] " + "[-refreshNodesResources] [-refresh" + "SuperUserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup " + "[username]] [-addToClusterNodeLabels " + "<\"label1(exclusive=true),label2(exclusive=false),label3\">] " + "[-removeFromClusterNodeLabels <label1,label2,label3>] " + "[-replaceLabelsOnNode " + "<\"node1[:port]=label1 node2[:port]=label2\"> " + "[-failOnUnknownNodes]] " + "[-directlyAccessNodeLabelStore] [-refreshClusterMaxPriority] " + "[-updateNodeResource [NodeID] [MemSize] [vCores] " + "([OvercommitTimeout]) or -updateNodeResource " + "[NodeID] [ResourceTypes] ([OvercommitTimeout])] " + "[-help [cmd]]"));
        assertTrue(dataOut.toString().contains("-refreshQueues: Reload the queues' acls, states and scheduler " + "specific properties."));
        assertTrue(dataOut.toString().contains("-refreshNodes [-g|graceful [timeout in seconds]" + " -client|server]: " + "Refresh the hosts information at the ResourceManager."));
        assertTrue(dataOut.toString().contains("-refreshNodesResources: Refresh resources of NodeManagers at the " + "ResourceManager."));
        assertTrue(dataOut.toString().contains("-refreshUserToGroupsMappings: Refresh user-to-groups mappings"));
        assertTrue(dataOut.toString().contains("-refreshSuperUserGroupsConfiguration: Refresh superuser proxy" + " groups mappings"));
        assertTrue(dataOut.toString().contains("-refreshAdminAcls: Refresh acls for administration of " + "ResourceManager"));
        assertTrue(dataOut.toString().contains("-refreshServiceAcl: Reload the service-level authorization" + " policy file"));
        assertTrue(dataOut.toString().contains("-help [cmd]: Displays help for the given command or all " + "commands if none"));
        testError(new String[] { "-help", "-refreshQueues" }, "Usage: yarn rmadmin [-refreshQueues]", dataErr, 0);
        testError(new String[] { "-help", "-refreshNodes" }, "Usage: yarn rmadmin [-refreshNodes [-g|graceful " + "[timeout in seconds] -client|server]]", dataErr, 0);
        testError(new String[] { "-help", "-refreshNodesResources" }, "Usage: yarn rmadmin [-refreshNodesResources]", dataErr, 0);
        testError(new String[] { "-help", "-refreshUserToGroupsMappings" }, "Usage: yarn rmadmin [-refreshUserToGroupsMappings]", dataErr, 0);
        testError(new String[] { "-help", "-refreshSuperUserGroupsConfiguration" }, "Usage: yarn rmadmin [-refreshSuperUserGroupsConfiguration]", dataErr, 0);
        testError(new String[] { "-help", "-refreshAdminAcls" }, "Usage: yarn rmadmin [-refreshAdminAcls]", dataErr, 0);
        testError(new String[] { "-help", "-refreshServiceAcl" }, "Usage: yarn rmadmin [-refreshServiceAcl]", dataErr, 0);
        testError(new String[] { "-help", "-getGroups" }, "Usage: yarn rmadmin [-getGroups [username]]", dataErr, 0);
        testError(new String[] { "-help", "-transitionToActive" }, "Usage: yarn rmadmin [-transitionToActive [--forceactive]" + " <serviceId>]", dataErr, 0);
        testError(new String[] { "-help", "-transitionToStandby" }, "Usage: yarn rmadmin [-transitionToStandby <serviceId>]", dataErr, 0);
        testError(new String[] { "-help", "-getServiceState" }, "Usage: yarn rmadmin [-getServiceState <serviceId>]", dataErr, 0);
        testError(new String[] { "-help", "-checkHealth" }, "Usage: yarn rmadmin [-checkHealth <serviceId>]", dataErr, 0);
        testError(new String[] { "-help", "-badParameter" }, "Usage: yarn rmadmin", dataErr, 0);
        testError(new String[] { "-badParameter" }, "badParameter: Unknown command", dataErr, -1);
        assertEquals(0, rmAdminCLIWithHAEnabled.run(args));
        oldOutPrintStream.println(dataOut);
        String expectedHelpMsg = "yarn rmadmin [-refreshQueues] [-refreshNodes [-g|graceful " + "[timeout in seconds] -client|server]] " + "[-refreshNodesResources] [-refreshSuperUserGroupsConfiguration] " + "[-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true)," + "label2(exclusive=false),label3\">]" + " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode " + "<\"node1[:port]=label1 node2[:port]=label2\"> " + "[-failOnUnknownNodes]] [-directlyAccessNodeLabelStore] " + "[-refreshClusterMaxPriority] " + "[-updateNodeResource [NodeID] [MemSize] [vCores] " + "([OvercommitTimeout]) " + "or -updateNodeResource [NodeID] [ResourceTypes] " + "([OvercommitTimeout])] " + "[-transitionToActive [--forceactive] <serviceId>] " + "[-transitionToStandby <serviceId>] " + "[-getServiceState <serviceId>] [-getAllServiceState] " + "[-checkHealth <serviceId>] [-help [cmd]]";
        String actualHelpMsg = dataOut.toString();
        assertTrue(String.format("Help messages: %n " + actualHelpMsg + " %n doesn't include expected " + "messages: %n" + expectedHelpMsg), actualHelpMsg.contains(expectedHelpMsg));
    } finally {
        System.setOut(oldOutPrintStream);
        System.setErr(oldErrPrintStream);
    }
}
291769.7428102hadoop
 void genCurElementMethod(String className, Method method, int indent) {
    String methodName = method.getName();
    Class<?>[] params = method.getParameterTypes();
    if (topMode || params.length > 0) {
        echo(indent, "\n", "@Override\n", "public ", className, topMode ? " " : "<T> ", methodName, "(");
    }
    if (params.length == 0) {
        if (topMode) {
            puts(0, ") {");
            puts(indent, "  return this;\n", "}");
        }
    } else if (params.length == 1) {
        if (methodName.equals("base")) {
            puts(0, "String href) {");
            puts(indent, "  return base().$href(href).__();\n", "}");
        } else if (methodName.equals("script")) {
            puts(0, "String src) {");
            puts(indent, "  return setScriptSrc(script(), src).__();\n", "}");
        } else if (methodName.equals("style")) {
            puts(0, "Object... lines) {");
            puts(indent, "  return style().$type(\"text/css\").__(lines).__();\n", "}");
        } else if (methodName.equals("img")) {
            puts(0, "String src) {");
            puts(indent, "  return ", methodName, "().$src(src).__();\n", "}");
        } else if (methodName.equals("br") || methodName.equals("hr") || methodName.equals("col")) {
            puts(0, "String selector) {");
            puts(indent, "  return setSelector(", methodName, "(), selector).__();\n", "}");
        } else if (methodName.equals("link")) {
            puts(0, "String href) {");
            puts(indent, "  return setLinkHref(", methodName, "(), href).__();\n", "}");
        } else if (methodName.equals("__")) {
            if (params[0].getSimpleName().equals("Class")) {
                puts(0, "Class<? extends SubView> cls) {");
                puts(indent, "  ", topMode ? "subView" : "_v", "(cls);\n", "  return this;\n", "}");
            } else {
                puts(0, "Object... lines) {");
                puts(indent, "  _p(", needsEscaping(className), ", lines);\n", "  return this;\n", "}");
            }
        } else if (methodName.equals("_r")) {
            puts(0, "Object... lines) {");
            puts(indent, "  _p(false, lines);\n", "  return this;\n", "}");
        } else {
            puts(0, "String cdata) {");
            puts(indent, "  return ", methodName, "().__(cdata).__();\n", "}");
        }
    } else if (params.length == 2) {
        if (methodName.equals("meta")) {
            puts(0, "String name, String content) {");
            puts(indent, "  return meta().$name(name).$content(content).__();\n", "}");
        } else if (methodName.equals("meta_http")) {
            puts(0, "String header, String content) {");
            puts(indent, "  return meta().$http_equiv(header).$content(content).__();\n", "}");
        } else if (methodName.equals("a")) {
            puts(0, "String href, String anchorText) {");
            puts(indent, "  return a().$href(href).__(anchorText).__();\n", "}");
        } else if (methodName.equals("bdo")) {
            puts(0, "Dir dir, String cdata) {");
            puts(indent, "  return bdo().$dir(dir).__(cdata).__();\n", "}");
        } else if (methodName.equals("label")) {
            puts(0, "String forId, String cdata) {");
            puts(indent, "  return label().$for(forId).__(cdata).__();\n", "}");
        } else if (methodName.equals("param")) {
            puts(0, "String name, String value) {");
            puts(indent, "  return param().$name(name).$value(value).__();\n", "}");
        } else {
            puts(0, "String selector, String cdata) {");
            puts(indent, "  return setSelector(", methodName, "(), selector).__(cdata).__();\n", "}");
        }
    } else if (params.length == 3) {
        if (methodName.equals("a")) {
            puts(0, "String selector, String href, String anchorText) {");
            puts(indent, "  return setSelector(a(), selector)", ".$href(href).__(anchorText).__();\n", "}");
        }
    } else {
        throwUnhandled(className, method);
    }
}
294240.732117hadoop
protected void loadTestEntityData() throws IOException {
    beforeTs = System.currentTimeMillis() - 1;
    TimelineEntities entities = new TimelineEntities();
    Map<String, Set<Object>> primaryFilters = new HashMap<String, Set<Object>>();
    Set<Object> l1 = new HashSet<Object>();
    l1.add("username");
    Set<Object> l2 = new HashSet<Object>();
    l2.add(Integer.MAX_VALUE);
    Set<Object> l3 = new HashSet<Object>();
    l3.add("123abc");
    Set<Object> l4 = new HashSet<Object>();
    l4.add((long) Integer.MAX_VALUE + 1l);
    primaryFilters.put("user", l1);
    primaryFilters.put("appname", l2);
    primaryFilters.put("other", l3);
    primaryFilters.put("long", l4);
    Map<String, Object> secondaryFilters = new HashMap<String, Object>();
    secondaryFilters.put("startTime", 123456);
    secondaryFilters.put("status", "RUNNING");
    Map<String, Object> otherInfo1 = new HashMap<String, Object>();
    otherInfo1.put("info1", "val1");
    otherInfo1.putAll(secondaryFilters);
    String entityId1 = "id_1";
    String entityType1 = "type_1";
    String entityId1b = "id_2";
    String entityId2 = "id_2";
    String entityType2 = "type_2";
    String entityId4 = "id_4";
    String entityType4 = "type_4";
    String entityId5 = "id_5";
    String entityType5 = "type_5";
    String entityId6 = "id_6";
    String entityId7 = "id_7";
    String entityType7 = "type_7";
    Map<String, Set<String>> relatedEntities = new HashMap<String, Set<String>>();
    relatedEntities.put(entityType2, Collections.singleton(entityId2));
    TimelineEvent ev3 = createEvent(789l, "launch_event", null);
    TimelineEvent ev4 = createEvent(0l, "init_event", null);
    List<TimelineEvent> events = new ArrayList<TimelineEvent>();
    events.add(ev3);
    events.add(ev4);
    entities.setEntities(Collections.singletonList(createEntity(entityId2, entityType2, null, events, null, null, null, "domain_id_1")));
    TimelinePutResponse response = store.put(entities);
    assertEquals(0, response.getErrors().size());
    TimelineEvent ev1 = createEvent(123l, "start_event", null);
    entities.setEntities(Collections.singletonList(createEntity(entityId1, entityType1, 123l, Collections.singletonList(ev1), relatedEntities, primaryFilters, otherInfo1, "domain_id_1")));
    response = store.put(entities);
    assertEquals(0, response.getErrors().size());
    entities.setEntities(Collections.singletonList(createEntity(entityId1b, entityType1, null, Collections.singletonList(ev1), relatedEntities, primaryFilters, otherInfo1, "domain_id_1")));
    response = store.put(entities);
    assertEquals(0, response.getErrors().size());
    Map<String, Object> eventInfo = new HashMap<String, Object>();
    eventInfo.put("event info 1", "val1");
    TimelineEvent ev2 = createEvent(456l, "end_event", eventInfo);
    Map<String, Object> otherInfo2 = new HashMap<String, Object>();
    otherInfo2.put("info2", "val2");
    entities.setEntities(Collections.singletonList(createEntity(entityId1, entityType1, null, Collections.singletonList(ev2), null, primaryFilters, otherInfo2, "domain_id_1")));
    response = store.put(entities);
    assertEquals(0, response.getErrors().size());
    entities.setEntities(Collections.singletonList(createEntity(entityId1b, entityType1, 789l, Collections.singletonList(ev2), null, primaryFilters, otherInfo2, "domain_id_1")));
    response = store.put(entities);
    assertEquals(0, response.getErrors().size());
    entities.setEntities(Collections.singletonList(createEntity("badentityid", "badentity", null, null, null, null, otherInfo1, "domain_id_1")));
    response = store.put(entities);
    assertEquals(1, response.getErrors().size());
    TimelinePutError error = response.getErrors().get(0);
    assertEquals("badentityid", error.getEntityId());
    assertEquals("badentity", error.getEntityType());
    assertEquals(TimelinePutError.NO_START_TIME, error.getErrorCode());
    relatedEntities.clear();
    relatedEntities.put(entityType5, Collections.singleton(entityId5));
    entities.setEntities(Collections.singletonList(createEntity(entityId4, entityType4, 42l, null, relatedEntities, null, null, "domain_id_1")));
    response = store.put(entities);
    relatedEntities.clear();
    otherInfo1.put("info2", "val2");
    entities.setEntities(Collections.singletonList(createEntity(entityId6, entityType1, 61l, null, relatedEntities, primaryFilters, otherInfo1, "domain_id_2")));
    response = store.put(entities);
    relatedEntities.clear();
    relatedEntities.put(entityType1, Collections.singleton(entityId1));
    entities.setEntities(Collections.singletonList(createEntity(entityId7, entityType7, 62l, null, relatedEntities, null, null, "domain_id_2")));
    response = store.put(entities);
    assertEquals(1, response.getErrors().size());
    assertEquals(entityType7, response.getErrors().get(0).getEntityType());
    assertEquals(entityId7, response.getErrors().get(0).getEntityId());
    assertEquals(TimelinePutError.FORBIDDEN_RELATION, response.getErrors().get(0).getErrorCode());
    if (store instanceof LeveldbTimelineStore) {
        LeveldbTimelineStore leveldb = (LeveldbTimelineStore) store;
        entities.setEntities(Collections.singletonList(createEntity("OLD_ENTITY_ID_1", "OLD_ENTITY_TYPE_1", 63l, null, null, null, null, null)));
        leveldb.putWithNoDomainId(entities);
        entities.setEntities(Collections.singletonList(createEntity("OLD_ENTITY_ID_2", "OLD_ENTITY_TYPE_1", 64l, null, null, null, null, null)));
        leveldb.putWithNoDomainId(entities);
    }
}
292331.7815119hadoop
public Response getLogs(@PathParam(YarnWebServiceParams.CONTAINER_ID) final String containerIdStr, @PathParam(YarnWebServiceParams.CONTAINER_LOG_FILE_NAME) String filename, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_FORMAT) String format, @QueryParam(YarnWebServiceParams.RESPONSE_CONTENT_SIZE) String size) {
    ContainerId tempContainerId;
    try {
        tempContainerId = ContainerId.fromString(containerIdStr);
    } catch (IllegalArgumentException ex) {
        return Response.status(Status.BAD_REQUEST).build();
    }
    final ContainerId containerId = tempContainerId;
    boolean tempIsRunning = false;
    try {
        Container container = nmContext.getContainers().get(containerId);
        tempIsRunning = (container.getContainerState() == ContainerState.RUNNING);
    } catch (Exception ex) {
        LOG.debug("Can not find the container:{} in this node.", containerId);
    }
    final boolean isRunning = tempIsRunning;
    File logFile = null;
    try {
        logFile = ContainerLogsUtils.getContainerLogFile(containerId, filename, request.getRemoteUser(), nmContext);
    } catch (NotFoundException ex) {
        if (redirectWSUrl == null || redirectWSUrl.isEmpty()) {
            return Response.status(Status.NOT_FOUND).entity(ex.getMessage()).build();
        }
        String redirectURI = "/containers/" + containerIdStr + "/logs/" + filename;
        return createRedirectResponse(request, redirectWSUrl, redirectURI);
    } catch (YarnException ex) {
        return Response.serverError().entity(ex.getMessage()).build();
    }
    final long bytes = parseLongParam(size);
    final String lastModifiedTime = Times.format(logFile.lastModified());
    final String outputFileName = filename;
    String contentType = WebAppUtils.getDefaultLogContentType();
    if (format != null && !format.isEmpty()) {
        contentType = WebAppUtils.getSupportedLogContentType(format);
        if (contentType == null) {
            String errorMessage = "The valid values for the parameter : format " + "are " + WebAppUtils.listSupportedLogContentType();
            return Response.status(Status.BAD_REQUEST).entity(errorMessage).build();
        }
    }
    try {
        final FileInputStream fis = ContainerLogsUtils.openLogFileForRead(containerIdStr, logFile, nmContext);
        final long fileLength = logFile.length();
        StreamingOutput stream = new StreamingOutput() {

            @Override
            public void write(OutputStream os) throws IOException, WebApplicationException {
                try {
                    LogToolUtils.outputContainerLogThroughZeroCopy(containerId.toString(), nmContext.getNodeId().toString(), outputFileName, fileLength, bytes, lastModifiedTime, fis, os, ContainerLogAggregationType.LOCAL);
                    StringBuilder sb = new StringBuilder();
                    String endOfFile = "End of LogType:" + outputFileName;
                    sb.append(endOfFile + ".");
                    if (isRunning) {
                        sb.append("This log file belongs to a running container (" + containerIdStr + ") and so may not be complete." + "\n");
                    } else {
                        sb.append("\n");
                    }
                    sb.append(StringUtils.repeat("*", endOfFile.length() + 50) + "\n\n");
                    os.write(sb.toString().getBytes(StandardCharsets.UTF_8));
                    ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId();
                    Application app = nmContext.getApplications().get(appId);
                    String appOwner = app == null ? null : app.getUser();
                    try {
                        ContainerLogsRequest logRequest = new ContainerLogsRequest();
                        logRequest.setAppId(appId);
                        logRequest.setAppOwner(appOwner);
                        logRequest.setContainerId(containerId.toString());
                        logRequest.setNodeId(nmContext.getNodeId().toString());
                        logRequest.setBytes(bytes);
                        Set<String> logTypes = new HashSet<>();
                        logTypes.add(outputFileName);
                        logRequest.setLogTypes(logTypes);
                        factory.getFileControllerForRead(appId, appOwner).readAggregatedLogs(logRequest, os);
                    } catch (Exception ex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Can not access the aggregated log for " + "the container:" + containerId);
                            LOG.debug(ex.getMessage());
                        }
                    }
                } finally {
                    IOUtils.closeStream(fis);
                }
            }
        };
        ResponseBuilder resp = Response.ok(stream);
        resp.header("Content-Type", contentType + "; " + JettyUtils.UTF_8);
        resp.header("X-Content-Type-Options", "nosniff");
        return resp.build();
    } catch (IOException ex) {
        return Response.serverError().entity(ex.getMessage()).build();
    }
}
293323.883118hadoop
public void testContainersFromPreviousAttemptsWithRMRestart() throws Exception {
    getConf().setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
    getConf().setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
    getConf().setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
    getConf().setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
    getConf().set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
    MockRM rm1 = new MockRM(getConf());
    MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
    rm1.start();
    YarnScheduler scheduler = rm1.getResourceScheduler();
    String nm1Address = "127.0.0.1:1234";
    MockNM nm1 = new MockNM(nm1Address, 10240, rm1.getResourceTrackerService());
    nm1.registerNode();
    String nm2Address = "127.0.0.1:2351";
    MockNM nm2 = new MockNM(nm2Address, 4089, rm1.getResourceTrackerService());
    nm2.registerNode();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm1).withAppName("name").withUser("user").withAcls(new HashMap<>()).withUnmanagedAM(false).withQueue("default").withMaxAppAttempts(-1).withCredentials(null).withAppType("MAPREDUCE").withWaitForAppAcceptedState(false).withKeepContainers(true).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    allocateContainers(nm1, am1, 1);
    allocateContainers(nm2, am1, 1);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
    ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    nm2.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
    ContainerId containerId3 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
    rm1.waitForState(nm2, containerId3, RMContainerState.RUNNING);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
    rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FAILED);
    TestSchedulerUtils.waitSchedulerApplicationAttemptStopped((AbstractYarnScheduler) scheduler, am1.getApplicationAttemptId());
    MockRM rm2 = new MockRM(getConf(), memStore);
    rm2.start();
    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
    NMContainerStatus container2Status = TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
    nm1.registerNode(Lists.newArrayList(container2Status), null);
    Thread.sleep(3000);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
    rm2.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    Assert.assertNotNull(rm2.getResourceScheduler().getRMContainer(containerId2));
    rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
    ApplicationAttemptId newAttemptId = app1.getCurrentAppAttempt().getAppAttemptId();
    Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
    MockAM am2 = MockRM.launchAMWhenAsyncSchedulingEnabled(app1, rm2);
    RegisterApplicationMasterResponse registerResponse = am2.registerAppAttempt();
    Assert.assertEquals(1, registerResponse.getContainersFromPreviousAttempts().size());
    Assert.assertEquals("container 2", containerId2, registerResponse.getContainersFromPreviousAttempts().get(0).getId());
    List<NMToken> prevNMTokens = registerResponse.getNMTokensFromPreviousAttempts();
    Assert.assertEquals(1, prevNMTokens.size());
    Assert.assertEquals(nm1Address, prevNMTokens.get(0).getNodeId().toString());
    rm2.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
    nm2.setResourceTrackerService(rm2.getResourceTrackerService());
    NMContainerStatus container3Status = TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
    nm2.registerNode(Lists.newArrayList(container3Status), null);
    nm2.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
    rm2.waitForState(nm2, containerId3, RMContainerState.RUNNING);
    Assert.assertNotNull(rm2.getResourceScheduler().getRMContainer(containerId3));
    List<Container> containersFromPreviousAttempts = new ArrayList<>();
    GenericTestUtils.waitFor(() -> {
        try {
            AllocateResponse allocateResponse = am2.doHeartbeat();
            if (allocateResponse.getContainersFromPreviousAttempts().size() > 0) {
                containersFromPreviousAttempts.addAll(allocateResponse.getContainersFromPreviousAttempts());
                Assert.assertEquals("new containers should not be allocated", 0, allocateResponse.getAllocatedContainers().size());
                List<NMToken> nmTokens = allocateResponse.getNMTokens();
                Assert.assertEquals(1, nmTokens.size());
                Assert.assertEquals(nm2Address, nmTokens.get(0).getNodeId().toString());
                return true;
            }
        } catch (Exception e) {
            Throwables.throwIfUnchecked(e);
            throw new RuntimeException(e);
        }
        return false;
    }, 2000, 200000);
    Assert.assertEquals("container 3", containerId3, containersFromPreviousAttempts.get(0).getId());
    rm2.stop();
    rm1.stop();
}
293826.169103hadoop
private void setupQueue(CSQueue queue, String q, String[] queueExprArray, int idx) {
    LOG.debug("*** Setup queue, source=" + q);
    String queuePath = null;
    int myLevel = getLevel(q);
    if (0 == myLevel) {
        when(queue.getQueuePath()).thenReturn(ROOT);
        queuePath = ROOT;
    }
    String queueName = getQueueName(q);
    when(queue.getQueueName()).thenReturn(queueName);
    ParentQueue parentQueue = getParentQueue(queueExprArray, idx, myLevel);
    if (null != parentQueue) {
        when(queue.getParent()).thenReturn(parentQueue);
        parentQueue.getChildQueues().add(queue);
        queuePath = parentQueue.getQueuePath() + "." + queueName;
    }
    when(queue.getQueuePath()).thenReturn(queuePath);
    QueueCapacities qc = new QueueCapacities(0 == myLevel);
    ResourceUsage ru = new ResourceUsage();
    QueueResourceQuotas qr = new QueueResourceQuotas();
    when(queue.getQueueCapacities()).thenReturn(qc);
    when(queue.getQueueResourceUsage()).thenReturn(ru);
    when(queue.getQueueResourceQuotas()).thenReturn(qr);
    LOG.debug("Setup queue, short name=" + queue.getQueueName() + " path=" + queue.getQueuePath());
    LOG.debug("Parent=" + (parentQueue == null ? "null" : parentQueue.getQueuePath()));
    String capacitySettingStr = q.substring(q.indexOf("(") + 1, q.indexOf(")"));
    for (String s : capacitySettingStr.split(",")) {
        String partitionName = s.substring(0, s.indexOf("="));
        String[] values = s.substring(s.indexOf("[") + 1, s.indexOf("]")).split(" ");
        float epsilon = 1e-6f;
        Resource toResourcePerPartition = partitionToResource.get(partitionName);
        float absGuaranteed = Resources.divide(resourceCalculator, toResourcePerPartition, parseResourceFromString(values[0].trim()), toResourcePerPartition) + epsilon;
        float absMax = Resources.divide(resourceCalculator, toResourcePerPartition, parseResourceFromString(values[1].trim()), toResourcePerPartition) + epsilon;
        float absUsed = Resources.divide(resourceCalculator, toResourcePerPartition, parseResourceFromString(values[2].trim()), toResourcePerPartition) + epsilon;
        float used = Resources.divide(resourceCalculator, toResourcePerPartition, parseResourceFromString(values[2].trim()), parseResourceFromString(values[0].trim())) + epsilon;
        Resource pending = parseResourceFromString(values[3].trim());
        qc.setAbsoluteCapacity(partitionName, absGuaranteed);
        qc.setAbsoluteMaximumCapacity(partitionName, absMax);
        qc.setAbsoluteUsedCapacity(partitionName, absUsed);
        qc.setUsedCapacity(partitionName, used);
        qr.setEffectiveMaxResource(parseResourceFromString(values[1].trim()));
        qr.setEffectiveMinResource(parseResourceFromString(values[0].trim()));
        qr.setEffectiveMaxResource(partitionName, parseResourceFromString(values[1].trim()));
        qr.setEffectiveMinResource(partitionName, parseResourceFromString(values[0].trim()));
        when(queue.getUsedCapacity()).thenReturn(used);
        when(queue.getEffectiveCapacity(partitionName)).thenReturn(parseResourceFromString(values[0].trim()));
        when(queue.getEffectiveMaxCapacity(partitionName)).thenReturn(parseResourceFromString(values[1].trim()));
        ru.setPending(partitionName, pending);
        Resource reserved = Resources.none();
        if (values.length == 5) {
            reserved = parseResourceFromString(values[4].trim());
            ru.setReserved(partitionName, reserved);
        }
        if (!isParent(queueExprArray, idx)) {
            LeafQueue lq = (LeafQueue) queue;
            when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(false))).thenReturn(pending);
            when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(true))).thenReturn(Resources.subtract(pending, reserved));
        }
        ru.setUsed(partitionName, parseResourceFromString(values[2].trim()));
        LOG.debug("Setup queue=" + queueName + " partition=" + partitionName + " [abs_guaranteed=" + absGuaranteed + ",abs_max=" + absMax + ",abs_used" + absUsed + ",pending_resource=" + pending + ", reserved_resource=" + reserved + "]");
    }
    when(queue.getPreemptionDisabled()).thenReturn(conf.getPreemptionDisabled(new QueuePath(queuePath), false));
    Map<String, String> otherConfigs = getOtherConfigurations(queueExprArray[idx]);
    if (otherConfigs.containsKey("priority")) {
        when(queue.getPriority()).thenReturn(Priority.newInstance(Integer.valueOf(otherConfigs.get("priority"))));
    } else {
        when(queue.getPriority()).thenReturn(Priority.newInstance(0));
    }
    if (otherConfigs.containsKey("disable_preemption")) {
        when(queue.getPreemptionDisabled()).thenReturn(Boolean.valueOf(otherConfigs.get("disable_preemption")));
    }
    nameToCSQueues.put(queuePath, queue);
    nameToCSQueues.put(queueName, queue);
    when(cs.getQueue(eq(queuePath))).thenReturn(queue);
    when(cs.getQueue(eq(queueName))).thenReturn(queue);
    when(cs.normalizeQueueName(eq(queuePath))).thenReturn(queuePath);
    when(cs.normalizeQueueName(eq(queueName))).thenReturn(queuePath);
}
295227.531108hadoop
public void testMultiplePartitionsWithMultiLevelQueuesMetrics() throws Exception {
    String parentQueueName = "root";
    Queue parentQueue = mock(Queue.class);
    QueueMetrics root = QueueMetrics.forQueue(ms, parentQueueName, null, true, CONF);
    when(parentQueue.getQueueName()).thenReturn(parentQueueName);
    when(parentQueue.getMetrics()).thenReturn(root);
    QueueMetrics q1 = QueueMetrics.forQueue(ms, "root.q1", parentQueue, true, CONF);
    Queue childQueue1 = mock(Queue.class);
    when(childQueue1.getQueueName()).thenReturn("root.q1");
    when(childQueue1.getMetrics()).thenReturn(q1);
    QueueMetrics q11 = QueueMetrics.forQueue(ms, "root.q1.q11", childQueue1, true, CONF);
    QueueMetrics q12 = QueueMetrics.forQueue(ms, "root.q1.q12", childQueue1, true, CONF);
    QueueMetrics q2 = QueueMetrics.forQueue(ms, "root.q2", parentQueue, true, CONF);
    Queue childQueue2 = mock(Queue.class);
    when(childQueue2.getQueueName()).thenReturn("root.q2");
    when(childQueue2.getMetrics()).thenReturn(q2);
    QueueMetrics q21 = QueueMetrics.forQueue(ms, "root.q2.q21", childQueue2, true, CONF);
    QueueMetrics q22 = QueueMetrics.forQueue(ms, "root.q2.q22", childQueue2, true, CONF);
    root.setAvailableResourcesToQueue("x", Resources.createResource(200 * GB, 200));
    q1.setAvailableResourcesToQueue("x", Resources.createResource(100 * GB, 100));
    q11.setAvailableResourcesToQueue("x", Resources.createResource(50 * GB, 50));
    q11.incrPendingResources("x", "test_user", 2, Resource.newInstance(1024, 1));
    MetricsSource partitionSource = partitionSource(ms, "x");
    MetricsSource rootQueueSource = queueSource(ms, "x", parentQueueName);
    MetricsSource q1Source = queueSource(ms, "x", "root.q1");
    MetricsSource userSource = userSource(ms, "x", "test_user", "root.q1");
    checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
    checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
    checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 2 * GB, 2, 2);
    checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 2 * GB, 2, 2);
    checkResources(userSource, 0, 0, 0, 0 * GB, 0, 2 * GB, 2, 2);
    q11.incrPendingResources("x", "test_user", 4, Resource.newInstance(1024, 1));
    MetricsSource q11Source = queueSource(ms, "x", "root.q1.q11");
    checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6);
    checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 6 * GB, 6, 6);
    checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 6 * GB, 6, 6);
    checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 6 * GB, 6, 6);
    checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6);
    q11.incrPendingResources("x", "test_user1", 5, Resource.newInstance(1024, 1));
    MetricsSource q1UserSource1 = userSource(ms, "x", "test_user1", "root.q1");
    MetricsSource userSource1 = userSource(ms, "x", "test_user1", "root.q1.q11");
    checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11);
    checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 11 * GB, 11, 11);
    checkResources(q11Source, 0, 0, 0, 50 * GB, 50, 11 * GB, 11, 11);
    checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 11 * GB, 11, 11);
    checkResources(userSource, 0, 0, 0, 0 * GB, 0, 6 * GB, 6, 6);
    checkResources(q1UserSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5);
    checkResources(userSource1, 0, 0, 0, 0 * GB, 0, 5 * GB, 5, 5);
    q12.incrPendingResources("x", "test_user", 5, Resource.newInstance(1024, 1));
    MetricsSource q12Source = queueSource(ms, "x", "root.q1.q12");
    checkResources(partitionSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16);
    checkResources(rootQueueSource, 0, 0, 0, 200 * GB, 200, 16 * GB, 16, 16);
    checkResources(q1Source, 0, 0, 0, 100 * GB, 100, 16 * GB, 16, 16);
    checkResources(q12Source, 0, 0, 0, 0, 0, 5 * GB, 5, 5);
    root.setAvailableResourcesToQueue("y", Resources.createResource(200 * GB, 200));
    q1.setAvailableResourcesToQueue("y", Resources.createResource(100 * GB, 100));
    q12.setAvailableResourcesToQueue("y", Resources.createResource(50 * GB, 50));
    q12.incrPendingResources("y", "test_user", 3, Resource.newInstance(1024, 1));
    MetricsSource yPartitionSource = partitionSource(ms, "y");
    MetricsSource yRootQueueSource = queueSource(ms, "y", parentQueueName);
    MetricsSource q1YSource = queueSource(ms, "y", "root.q1");
    MetricsSource q12YSource = queueSource(ms, "y", "root.q1.q12");
    checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3);
    checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 3 * GB, 3, 3);
    checkResources(q1YSource, 0, 0, 0, 100 * GB, 100, 3 * GB, 3, 3);
    checkResources(q12YSource, 0, 0, 0, 50 * GB, 50, 3 * GB, 3, 3);
    root.setAvailableResourcesToQueue("y", Resources.createResource(200 * GB, 200));
    q2.setAvailableResourcesToQueue("y", Resources.createResource(100 * GB, 100));
    q21.setAvailableResourcesToQueue("y", Resources.createResource(50 * GB, 50));
    q21.incrPendingResources("y", "test_user", 5, Resource.newInstance(1024, 1));
    MetricsSource q21Source = queueSource(ms, "y", "root.q2.q21");
    MetricsSource q2YSource = queueSource(ms, "y", "root.q2");
    checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8);
    checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 8 * GB, 8, 8);
    checkResources(q2YSource, 0, 0, 0, 100 * GB, 100, 5 * GB, 5, 5);
    checkResources(q21Source, 0, 0, 0, 50 * GB, 50, 5 * GB, 5, 5);
    q22.incrPendingResources("y", "test_user", 6, Resource.newInstance(1024, 1));
    MetricsSource q22Source = queueSource(ms, "y", "root.q2.q22");
    checkResources(yPartitionSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14);
    checkResources(yRootQueueSource, 0, 0, 0, 200 * GB, 200, 14 * GB, 14, 14);
    checkResources(q22Source, 0, 0, 0, 0, 0, 6 * GB, 6, 6);
}
294106.421111hadoop
public void testThreeLevelWithUserMetrics() {
    String parentQueueName = "root";
    String leafQueueName = "root.leaf";
    String leafQueueName1 = "root.leaf.leaf1";
    String user = "alice";
    String partitionX = "x";
    String partitionY = "y";
    QueueMetrics parentMetrics = QueueMetrics.forQueue(parentQueueName, null, true, CONF);
    Queue parentQueue = mock(Queue.class);
    when(parentQueue.getQueueName()).thenReturn(parentQueueName);
    when(parentQueue.getMetrics()).thenReturn(parentMetrics);
    QueueMetrics metrics = QueueMetrics.forQueue(leafQueueName, parentQueue, true, CONF);
    Queue leafQueue = mock(Queue.class);
    when(leafQueue.getQueueName()).thenReturn(leafQueueName);
    when(leafQueue.getMetrics()).thenReturn(metrics);
    QueueMetrics metrics1 = QueueMetrics.forQueue(leafQueueName1, leafQueue, true, CONF);
    AppSchedulingInfo app = mockApp(user);
    metrics1.submitApp(user, false);
    metrics1.submitAppAttempt(user, false);
    parentMetrics.setAvailableResourcesToQueue(partitionX, Resources.createResource(200 * GB, 200));
    parentMetrics.setAvailableResourcesToQueue(partitionY, Resources.createResource(500 * GB, 500));
    metrics.setAvailableResourcesToQueue(partitionX, Resources.createResource(100 * GB, 100));
    metrics.setAvailableResourcesToQueue(partitionY, Resources.createResource(400 * GB, 400));
    metrics1.setAvailableResourcesToQueue(partitionX, Resources.createResource(50 * GB, 50));
    metrics1.setAvailableResourcesToQueue(partitionY, Resources.createResource(300 * GB, 300));
    parentMetrics.setAvailableResourcesToUser(partitionX, user, Resources.createResource(20 * GB, 20));
    parentMetrics.setAvailableResourcesToUser(partitionY, user, Resources.createResource(50 * GB, 50));
    metrics.setAvailableResourcesToUser(partitionX, user, Resources.createResource(10 * GB, 10));
    metrics.setAvailableResourcesToUser(partitionY, user, Resources.createResource(40 * GB, 40));
    metrics1.setAvailableResourcesToUser(partitionX, user, Resources.createResource(5 * GB, 5));
    metrics1.setAvailableResourcesToUser(partitionY, user, Resources.createResource(30 * GB, 30));
    metrics1.incrPendingResources(partitionX, user, 6, Resources.createResource(3 * GB, 3));
    metrics1.incrPendingResources(partitionY, user, 6, Resources.createResource(4 * GB, 4));
    MetricsSource partitionSourceX = partitionSource(metrics1.getMetricsSystem(), partitionX);
    MetricsSource parentQueueSourceWithPartX = queueSource(metrics1.getMetricsSystem(), partitionX, parentQueueName);
    MetricsSource queueSourceWithPartX = queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName);
    MetricsSource queueSource1WithPartX = queueSource(metrics1.getMetricsSystem(), partitionX, leafQueueName1);
    MetricsSource parentUserSourceWithPartX = userSource(metrics1.getMetricsSystem(), partitionX, user, parentQueueName);
    MetricsSource userSourceWithPartX = userSource(metrics1.getMetricsSystem(), partitionX, user, leafQueueName);
    MetricsSource userSource1WithPartX = userSource(metrics1.getMetricsSystem(), partitionX, user, leafQueueName1);
    checkResources(partitionSourceX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(parentQueueSourceWithPartX, 0, 0, 0, 0, 0, 200 * GB, 200, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(queueSourceWithPartX, 0, 0, 0, 0, 0, 100 * GB, 100, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(queueSource1WithPartX, 0, 0, 0, 0, 0, 50 * GB, 50, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(parentUserSourceWithPartX, 0, 0, 0, 0, 0, 20 * GB, 20, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(userSourceWithPartX, 0, 0, 0, 0, 0, 10 * GB, 10, 18 * GB, 18, 6, 0, 0, 0);
    checkResources(userSource1WithPartX, 0, 0, 0, 0, 0, 5 * GB, 5, 18 * GB, 18, 6, 0, 0, 0);
    MetricsSource partitionSourceY = partitionSource(metrics1.getMetricsSystem(), partitionY);
    MetricsSource parentQueueSourceWithPartY = queueSource(metrics1.getMetricsSystem(), partitionY, parentQueueName);
    MetricsSource queueSourceWithPartY = queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName);
    MetricsSource queueSource1WithPartY = queueSource(metrics1.getMetricsSystem(), partitionY, leafQueueName1);
    MetricsSource parentUserSourceWithPartY = userSource(metrics1.getMetricsSystem(), partitionY, user, parentQueueName);
    MetricsSource userSourceWithPartY = userSource(metrics1.getMetricsSystem(), partitionY, user, leafQueueName);
    MetricsSource userSource1WithPartY = userSource(metrics1.getMetricsSystem(), partitionY, user, leafQueueName1);
    checkResources(partitionSourceY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(parentQueueSourceWithPartY, 0, 0, 0, 0, 0, 500 * GB, 500, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(queueSourceWithPartY, 0, 0, 0, 0, 0, 400 * GB, 400, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(queueSource1WithPartY, 0, 0, 0, 0, 0, 300 * GB, 300, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(parentUserSourceWithPartY, 0, 0, 0, 0, 0, 50 * GB, 50, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(userSourceWithPartY, 0, 0, 0, 0, 0, 40 * GB, 40, 24 * GB, 24, 6, 0, 0, 0);
    checkResources(userSource1WithPartY, 0, 0, 0, 0, 0, 30 * GB, 30, 24 * GB, 24, 6, 0, 0, 0);
    metrics1.finishAppAttempt(app.getApplicationId(), app.isPending(), app.getUser(), false);
    metrics1.finishApp(user, RMAppState.FINISHED, false);
}
293116.958115hadoop
private void amRestartTests(boolean keepRunningContainers) throws Exception {
    MockRM rm = new MockRM(conf);
    rm.start();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm).withAppName("name").withUser("user").withAcls(new HashMap<ApplicationAccessType, String>()).withUnmanagedAM(false).withQueue("default").withMaxAppAttempts(-1).withCredentials(null).withAppType("MAPREDUCE").withWaitForAppAcceptedState(false).withKeepContainers(keepRunningContainers).build();
    RMApp app = MockRMAppSubmitter.submit(rm, data);
    MockNM nm = new MockNM("127.0.0.1:1234", 10240, rm.getResourceTrackerService());
    nm.registerNode();
    MockAM am0 = MockRM.launchAndRegisterAM(app, rm, nm);
    int NUM_CONTAINERS = 1;
    am0.allocate("127.0.0.1", 1024, NUM_CONTAINERS, new ArrayList<ContainerId>());
    nm.nodeHeartbeat(true);
    List<Container> containers = am0.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
    while (containers.size() != NUM_CONTAINERS) {
        nm.nodeHeartbeat(true);
        containers.addAll(am0.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers());
        Thread.sleep(200);
    }
    ContainerId containerId2 = ContainerId.newContainerId(am0.getApplicationAttemptId(), 2);
    nm.nodeHeartbeat(am0.getApplicationAttemptId(), containerId2.getContainerId(), ContainerState.RUNNING);
    rm.waitForState(nm, containerId2, RMContainerState.RUNNING);
    Collection<RMContainer> rmContainers = rm.scheduler.getSchedulerAppInfo(am0.getApplicationAttemptId()).getLiveContainers();
    ContainerId amContainerId = app.getCurrentAppAttempt().getMasterContainer().getId();
    nm.nodeHeartbeat(am0.getApplicationAttemptId(), amContainerId.getContainerId(), ContainerState.COMPLETE);
    rm.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.FAILED);
    rm.drainEvents();
    long memorySeconds = 0;
    long vcoreSeconds = 0;
    if (keepRunningContainers) {
        for (RMContainer c : rmContainers) {
            if (c.getContainerId().equals(amContainerId)) {
                AggregateAppResourceUsage ru = calculateContainerResourceMetrics(c);
                memorySeconds += ru.getMemorySeconds();
                vcoreSeconds += ru.getVcoreSeconds();
            } else {
                Assert.assertTrue("After first attempt failed, remaining container " + "should still be running. ", c.getContainerState().equals(ContainerState.RUNNING));
            }
        }
    } else {
        for (RMContainer c : rmContainers) {
            MockRM.waitForContainerCompletion(rm, nm, amContainerId, c);
            AggregateAppResourceUsage ru = calculateContainerResourceMetrics(c);
            memorySeconds += ru.getMemorySeconds();
            vcoreSeconds += ru.getVcoreSeconds();
        }
    }
    rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
    RMAppAttempt attempt2 = app.getCurrentAppAttempt();
    Assert.assertFalse(attempt2.getAppAttemptId().equals(am0.getApplicationAttemptId()));
    rm.waitForState(attempt2.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
    nm.nodeHeartbeat(true);
    MockAM am1 = rm.sendAMLaunched(attempt2.getAppAttemptId());
    am1.registerAppAttempt();
    rm.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.RUNNING);
    am1.allocate("127.0.0.1", 1024, NUM_CONTAINERS, new ArrayList<ContainerId>());
    nm.nodeHeartbeat(true);
    containers = am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
    while (containers.size() != NUM_CONTAINERS) {
        nm.nodeHeartbeat(true);
        containers.addAll(am1.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers());
        Thread.sleep(200);
    }
    rm.waitForState(app.getApplicationId(), RMAppState.RUNNING);
    rmContainers = rm.scheduler.getSchedulerAppInfo(attempt2.getAppAttemptId()).getLiveContainers();
    amContainerId = app.getCurrentAppAttempt().getMasterContainer().getId();
    nm.nodeHeartbeat(am0.getApplicationAttemptId(), amContainerId.getContainerId(), ContainerState.COMPLETE);
    MockRM.finishAMAndVerifyAppState(app, rm, nm, am1);
    for (RMContainer c : rmContainers) {
        MockRM.waitForContainerCompletion(rm, nm, amContainerId, c);
        AggregateAppResourceUsage ru = calculateContainerResourceMetrics(c);
        memorySeconds += ru.getMemorySeconds();
        vcoreSeconds += ru.getVcoreSeconds();
    }
    RMAppMetrics rmAppMetrics = app.getRMAppMetrics();
    Assert.assertEquals("Unexpected MemorySeconds value", memorySeconds, rmAppMetrics.getMemorySeconds());
    Assert.assertEquals("Unexpected VcoreSeconds value", vcoreSeconds, rmAppMetrics.getVcoreSeconds());
    rm.stop();
    return;
}
293270.544116hadoop
public static void loadEntities(HBaseTestingUtility util, long ts) throws IOException {
    TimelineEntities te = new TimelineEntities();
    TimelineEntity entity = new TimelineEntity();
    String id = "hello";
    String type = "world";
    entity.setId(id);
    entity.setType(type);
    Long cTime = 1425016502000L;
    entity.setCreatedTime(cTime);
    entity.addInfo(getInfoMap1());
    entity.setIsRelatedToEntities(getIsRelatedTo1());
    entity.setRelatesToEntities(getRelatesTo1());
    entity.addConfigs(getConfig1());
    Set<TimelineMetric> metrics = new HashSet<>();
    TimelineMetric m1 = new TimelineMetric();
    m1.setId("MAP_SLOT_MILLIS");
    m1.setType(Type.TIME_SERIES);
    m1.setValues(getMetricValues1(ts));
    metrics.add(m1);
    TimelineMetric m12 = new TimelineMetric();
    m12.setId("MAP1_BYTES");
    m12.addValue(ts, 50);
    metrics.add(m12);
    entity.addMetrics(metrics);
    entity.addEvent(addStartEvent(ts));
    te.addEntity(entity);
    TimelineEntity entity1 = new TimelineEntity();
    String id1 = "hello1";
    entity1.setId(id1);
    entity1.setType(type);
    entity1.setCreatedTime(cTime + 20L);
    entity1.addInfo(getInfoMap2());
    TimelineEvent event11 = new TimelineEvent();
    event11.setId("end_event");
    event11.setTimestamp(ts);
    entity1.addEvent(event11);
    TimelineEvent event12 = new TimelineEvent();
    event12.setId("update_event");
    event12.setTimestamp(ts - 10);
    entity1.addEvent(event12);
    entity1.setIsRelatedToEntities(getIsRelatedTo2());
    Set<String> relatesToSet1 = new HashSet<String>();
    relatesToSet1.add("relatesto1");
    relatesToSet1.add("relatesto2");
    Map<String, Set<String>> relatesTo1 = new HashMap<>();
    relatesTo1.put("container", relatesToSet1);
    entity1.setRelatesToEntities(relatesTo1);
    entity1.addConfigs(getConfig2());
    Set<TimelineMetric> metrics1 = new HashSet<>();
    TimelineMetric m2 = new TimelineMetric();
    m2.setId("MAP1_SLOT_MILLIS");
    m2.setType(Type.TIME_SERIES);
    m2.setValues(getMetricValues2(ts));
    metrics1.add(m2);
    entity1.addMetrics(metrics1);
    te.addEntity(entity1);
    te.addEntity(getEntity2(type, cTime, ts));
    for (int i = 0; i < 10; i++) {
        TimelineEntity entity3 = new TimelineEntity();
        String id3 = "typeTest" + i;
        entity3.setId(id3);
        StringBuilder typeName = new StringBuilder("newType");
        for (int j = 0; j < (i % 3); j++) {
            typeName.append(" ").append(j);
        }
        entity3.setType(typeName.toString());
        entity3.setCreatedTime(cTime + 80L + i);
        te.addEntity(entity3);
    }
    TimelineEntities appTe1 = new TimelineEntities();
    TimelineEntity entityApp1 = new TimelineEntity();
    String appName1 = "application_1231111111_1111";
    entityApp1.setId(appName1);
    entityApp1.setType(TimelineEntityType.YARN_APPLICATION.toString());
    entityApp1.setCreatedTime(cTime + 40L);
    TimelineEvent appCreationEvent1 = new TimelineEvent();
    appCreationEvent1.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    appCreationEvent1.setTimestamp(cTime);
    entityApp1.addEvent(appCreationEvent1);
    appTe1.addEntity(entityApp1);
    TimelineEntities appTe2 = new TimelineEntities();
    TimelineEntity entityApp2 = new TimelineEntity();
    String appName2 = "application_1231111111_1112";
    entityApp2.setId(appName2);
    entityApp2.setType(TimelineEntityType.YARN_APPLICATION.toString());
    entityApp2.setCreatedTime(cTime + 50L);
    TimelineEvent appCreationEvent2 = new TimelineEvent();
    appCreationEvent2.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
    appCreationEvent2.setTimestamp(cTime);
    entityApp2.addEvent(appCreationEvent2);
    appTe2.addEntity(entityApp2);
    HBaseTimelineWriterImpl hbi = null;
    try {
        hbi = new HBaseTimelineWriterImpl();
        hbi.init(util.getConfiguration());
        hbi.start();
        UserGroupInformation user = UserGroupInformation.createRemoteUser("user1");
        TimelineCollectorContext context = new TimelineCollectorContext("cluster1", "user1", "some_flow_name", "AB7822C10F1111", 1002345678919L, appName1);
        hbi.write(context, te, user);
        hbi.write(context, appTe1, user);
        context = new TimelineCollectorContext("cluster1", "user1", "some_flow_name", "AB7822C10F1111", 1002345678919L, appName2);
        hbi.write(context, te, user);
        hbi.write(context, appTe2, user);
        hbi.stop();
    } finally {
        if (hbi != null) {
            hbi.stop();
            hbi.close();
        }
    }
}
293218.35125hadoop
public void testWriteScanBatchLimit() throws Exception {
    String rowKey = "nonNumericRowKey";
    String column = "nonNumericColumnName";
    String value = "nonNumericValue";
    String column2 = "nonNumericColumnName2";
    String value2 = "nonNumericValue2";
    String column3 = "nonNumericColumnName3";
    String value3 = "nonNumericValue3";
    String column4 = "nonNumericColumnName4";
    String value4 = "nonNumericValue4";
    byte[] rowKeyBytes = Bytes.toBytes(rowKey);
    byte[] columnNameBytes = Bytes.toBytes(column);
    byte[] valueBytes = Bytes.toBytes(value);
    byte[] columnName2Bytes = Bytes.toBytes(column2);
    byte[] value2Bytes = Bytes.toBytes(value2);
    byte[] columnName3Bytes = Bytes.toBytes(column3);
    byte[] value3Bytes = Bytes.toBytes(value3);
    byte[] columnName4Bytes = Bytes.toBytes(column4);
    byte[] value4Bytes = Bytes.toBytes(value4);
    Put p = new Put(rowKeyBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    Configuration hbaseConf = util.getConfiguration();
    Connection conn = null;
    conn = ConnectionFactory.createConnection(hbaseConf);
    Table flowRunTable = conn.getTable(BaseTableRW.getTableName(hbaseConf, FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME));
    flowRunTable.put(p);
    String rowKey2 = "nonNumericRowKey2";
    byte[] rowKey2Bytes = Bytes.toBytes(rowKey2);
    p = new Put(rowKey2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    flowRunTable.put(p);
    String rowKey3 = "nonNumericRowKey3";
    byte[] rowKey3Bytes = Bytes.toBytes(rowKey3);
    p = new Put(rowKey3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    flowRunTable.put(p);
    Scan s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    int batchLimit = 2;
    s.setBatch(batchLimit);
    ResultScanner scanner = flowRunTable.getScanner(s);
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
    }
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    batchLimit = 3;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
    }
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    batchLimit = 1000;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
        assertEquals(4, values.size());
        rowCount++;
    }
    assertEquals(3, rowCount);
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    batchLimit = -2992;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertEquals(4, result.rawCells().length);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertEquals(4, values.size());
        rowCount++;
    }
    assertEquals(3, rowCount);
}
292832.012113hadoop
public void testReadEntitiesEventFilters() throws Exception {
    TimelineFilterList ef = new TimelineFilterList();
    ef.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(1, entities.size());
    int eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity id should have been hello2");
        }
    }
    assertEquals(1, eventCnt);
    TimelineFilterList ef1 = new TimelineFilterList();
    ef1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef1.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef1).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity id should have been hello2");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef2 = new TimelineFilterList();
    ef2.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef2).build(), new TimelineDataToRetrieve());
    assertEquals(2, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("hello") && !timelineEntity.getId().equals("hello2")) {
            Assert.fail("Entity ids' should have been hello and hello2");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef3 = new TimelineFilterList();
    ef3.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    ef3.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "dummy_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef3).build(), new TimelineDataToRetrieve());
    assertEquals(0, entities.size());
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "update_event"));
    list1.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "dummy_event"));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineExistsFilter(TimelineCompareOp.EQUAL, "start_event"));
    TimelineFilterList ef4 = new TimelineFilterList(Operator.OR, list1, list2);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef4).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("hello")) {
            Assert.fail("Entity id should have been hello");
        }
    }
    assertEquals(0, eventCnt);
    TimelineFilterList ef5 = new TimelineFilterList();
    ef5.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "update_event"));
    ef5.addFilter(new TimelineExistsFilter(TimelineCompareOp.NOT_EQUAL, "end_event"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, "application_1231111111_1111", "world", null), new TimelineEntityFilters.Builder().eventFilters(ef5).build(), new TimelineDataToRetrieve());
    assertEquals(1, entities.size());
    eventCnt = 0;
    for (TimelineEntity timelineEntity : entities) {
        eventCnt += timelineEntity.getEvents().size();
        if (!timelineEntity.getId().equals("hello")) {
            Assert.fail("Entity id should have been hello");
        }
    }
    assertEquals(0, eventCnt);
}
292701.3512112kafka
public void testVersionLogic() {
    String topic0 = "topic0";
    String topic1 = "topic1";
    for (short version : UPDATE_METADATA.allVersions()) {
        List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
        List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
        broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
        if (version >= 1) {
            broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
        }
        if (version >= 3) {
            broker0Endpoints.get(0).setListener("listener0");
            broker0Endpoints.get(1).setListener("listener1");
        }
        List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
        Map<String, Uuid> topicIds = new HashMap<>();
        topicIds.put(topic0, Uuid.randomUuid());
        topicIds.put(topic1, Uuid.randomUuid());
        UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
        assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
        assertEquals(liveBrokers, request.liveBrokers());
        assertEquals(1, request.controllerId());
        assertEquals(2, request.controllerEpoch());
        assertEquals(3, request.brokerEpoch());
        ByteBuffer byteBuffer = request.serialize();
        UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
        if (version < 2) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
        }
        if (version < 3) {
            for (UpdateMetadataBroker liveBroker : liveBrokers) {
                for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
                    SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
                    endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
                }
            }
        }
        if (version < 4)
            partitionStates.get(0).setOfflineReplicas(emptyList());
        assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
        assertEquals(liveBrokers, deserializedRequest.liveBrokers());
        assertEquals(1, deserializedRequest.controllerId());
        assertEquals(2, deserializedRequest.controllerEpoch());
        if (version >= 5)
            assertEquals(3, deserializedRequest.brokerEpoch());
        else
            assertEquals(-1, deserializedRequest.brokerEpoch());
        long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> !Uuid.ZERO_UUID.equals(topicId)).count();
        if (version >= 7)
            assertEquals(2, topicIdCount);
        else
            assertEquals(0, topicIdCount);
    }
}
294780.634107kafka
 void testRemoteLogManagerRemoteMetrics() throws Exception {
    long oldestSegmentStartOffset = 0L;
    long olderSegmentStartOffset = 75L;
    long nextSegmentStartOffset = 150L;
    when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition());
    checkpoint.write(totalEpochEntries);
    LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint);
    when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache));
    when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L));
    File tempFile = TestUtils.tempFile();
    File mockProducerSnapshotIndex = TestUtils.tempFile();
    File tempDir = TestUtils.tempDirectory();
    LogSegment oldestSegment = mock(LogSegment.class);
    LogSegment olderSegment = mock(LogSegment.class);
    LogSegment activeSegment = mock(LogSegment.class);
    when(oldestSegment.baseOffset()).thenReturn(oldestSegmentStartOffset);
    when(olderSegment.baseOffset()).thenReturn(olderSegmentStartOffset);
    when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset);
    FileRecords oldestFileRecords = mock(FileRecords.class);
    when(oldestSegment.log()).thenReturn(oldestFileRecords);
    when(oldestFileRecords.file()).thenReturn(tempFile);
    when(oldestFileRecords.sizeInBytes()).thenReturn(10);
    when(oldestSegment.readNextOffset()).thenReturn(olderSegmentStartOffset);
    FileRecords olderFileRecords = mock(FileRecords.class);
    when(olderSegment.log()).thenReturn(olderFileRecords);
    when(olderFileRecords.file()).thenReturn(tempFile);
    when(olderFileRecords.sizeInBytes()).thenReturn(10);
    when(olderSegment.readNextOffset()).thenReturn(nextSegmentStartOffset);
    when(mockLog.activeSegment()).thenReturn(activeSegment);
    when(mockLog.logStartOffset()).thenReturn(oldestSegmentStartOffset);
    when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(oldestSegment, olderSegment, activeSegment)));
    ProducerStateManager mockStateManager = mock(ProducerStateManager.class);
    when(mockLog.producerStateManager()).thenReturn(mockStateManager);
    when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex));
    when(mockLog.lastStableOffset()).thenReturn(250L);
    Map<String, Long> logProps = new HashMap<>();
    logProps.put("retention.bytes", 1000000L);
    logProps.put("retention.ms", -1L);
    LogConfig logConfig = new LogConfig(logProps);
    when(mockLog.config()).thenReturn(logConfig);
    OffsetIndex oldestIdx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1000).get();
    TimeIndex oldestTimeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1500).get();
    File oldestTxnFile = UnifiedLog.transactionIndexFile(tempDir, oldestSegmentStartOffset, "");
    oldestTxnFile.createNewFile();
    TransactionIndex oldestTxnIndex = new TransactionIndex(oldestSegmentStartOffset, oldestTxnFile);
    when(oldestSegment.timeIndex()).thenReturn(oldestTimeIdx);
    when(oldestSegment.offsetIndex()).thenReturn(oldestIdx);
    when(oldestSegment.txnIndex()).thenReturn(oldestTxnIndex);
    OffsetIndex olderIdx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, olderSegmentStartOffset, ""), olderSegmentStartOffset, 1000).get();
    TimeIndex olderTimeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, olderSegmentStartOffset, ""), olderSegmentStartOffset, 1500).get();
    File olderTxnFile = UnifiedLog.transactionIndexFile(tempDir, olderSegmentStartOffset, "");
    oldestTxnFile.createNewFile();
    TransactionIndex olderTxnIndex = new TransactionIndex(olderSegmentStartOffset, olderTxnFile);
    when(olderSegment.timeIndex()).thenReturn(olderTimeIdx);
    when(olderSegment.offsetIndex()).thenReturn(olderIdx);
    when(olderSegment.txnIndex()).thenReturn(olderTxnIndex);
    CompletableFuture<Void> dummyFuture = new CompletableFuture<>();
    dummyFuture.complete(null);
    when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture);
    when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture);
    Iterator<RemoteLogSegmentMetadata> iterator = listRemoteLogSegmentMetadata(leaderTopicIdPartition, 5, 100, 1024, RemoteLogSegmentState.COPY_SEGMENT_FINISHED).iterator();
    when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)).thenReturn(iterator);
    when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 2)).thenReturn(iterator);
    when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 1)).thenReturn(iterator);
    CountDownLatch remoteLogSizeComputationTimeLatch = new CountDownLatch(1);
    when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)).thenAnswer(ans -> {
        time.sleep(1000);
        return iterator;
    }).thenAnswer(ans -> {
        remoteLogSizeComputationTimeLatch.await(5000, TimeUnit.MILLISECONDS);
        return Collections.emptyIterator();
    });
    CountDownLatch latch = new CountDownLatch(1);
    doAnswer(ans -> Optional.empty()).doAnswer(ans -> {
        latch.await(5000, TimeUnit.MILLISECONDS);
        return Optional.empty();
    }).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class));
    Partition mockLeaderPartition = mockPartition(leaderTopicIdPartition);
    when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(175L, 100L);
    when(activeSegment.size()).thenReturn(100);
    when(mockLog.onlyLocalLogSegmentsCount()).thenReturn(1L);
    assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic));
    assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic));
    assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic));
    assertEquals(0L, yammerMetricValue("RemoteCopyLagBytes"));
    assertEquals(0L, yammerMetricValue("RemoteCopyLagSegments"));
    assertEquals(0L, yammerMetricValue("RemoteLogSizeComputationTime"));
    remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.emptySet(), topicIds);
    TestUtils.waitForCondition(() -> 75 == safeLongYammerMetricValue("RemoteCopyLagBytes") && 75 == safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), String.format("Expected to find 75 for RemoteCopyLagBytes metric value, but found %d for topic 'Leader' and %d for all topics.", safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), safeLongYammerMetricValue("RemoteCopyLagBytes")));
    TestUtils.waitForCondition(() -> 1 == safeLongYammerMetricValue("RemoteCopyLagSegments") && 1 == safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), String.format("Expected to find 1 for RemoteCopyLagSegments metric value, but found %d for topic 'Leader' and %d for all topics.", safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), safeLongYammerMetricValue("RemoteCopyLagSegments")));
    latch.countDown();
    TestUtils.waitForCondition(() -> safeLongYammerMetricValue("RemoteLogSizeComputationTime") >= 1000 && safeLongYammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic) >= 1000, String.format("Expected to find 1000 for RemoteLogSizeComputationTime metric value, but found %d for topic 'Leader' and %d for all topics.", safeLongYammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic), safeLongYammerMetricValue("RemoteLogSizeComputationTime")));
    remoteLogSizeComputationTimeLatch.countDown();
}
293249.391126kafka
public void testNewJoiningMemberTriggersNewTargetAssignment() {
    String groupId = "fooup";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    String memberId3 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 1))));
            put(memberId3, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 2))));
        }
    }));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setMemberEpoch(0).setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignor("range").setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), result.response());
    ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(memberId3).setState(MemberState.UNRELEASED_PARTITIONS).setMemberEpoch(11).setPreviousMemberEpoch(0).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember3), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 1))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId3, mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 2))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember3));
    assertRecordsEquals(expectedRecords.subList(0, 3), result.records().subList(0, 3));
    assertUnorderedListEquals(expectedRecords.subList(3, 6), result.records().subList(3, 6));
    assertRecordsEquals(expectedRecords.subList(6, 8), result.records().subList(6, 8));
}
293309.641128kafka
public void testGroupEpochBumpWhenNewStaticMemberJoins() {
    String groupId = "fooup";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    String memberId3 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 6).addTopic(barTopicId, barTopicName, 3).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withMember(new ConsumerGroupMember.Builder(memberId1).setState(MemberState.STABLE).setInstanceId(memberId1).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).build()).withMember(new ConsumerGroupMember.Builder(memberId2).setState(MemberState.STABLE).setInstanceId(memberId2).setMemberEpoch(10).setPreviousMemberEpoch(9).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList("foo", "bar", "zar")).setServerAssignorName("range").setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).build()).withAssignment(memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))).withAssignment(memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))).withAssignmentEpoch(10)).build();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 1))));
            put(memberId3, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 2))));
        }
    }));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId3).setInstanceId(memberId3).setMemberEpoch(0).setRebalanceTimeoutMs(5000).setServerAssignor("range").setSubscribedTopicNames(Arrays.asList("foo", "bar")).setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId3).setMemberEpoch(11).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), result.response());
    ConsumerGroupMember expectedMember3 = new ConsumerGroupMember.Builder(memberId3).setMemberEpoch(11).setState(MemberState.UNRELEASED_PARTITIONS).setInstanceId(memberId3).setPreviousMemberEpoch(0).setClientId("client").setClientHost("localhost/127.0.0.1").setRebalanceTimeoutMs(5000).setSubscribedTopicNames(Arrays.asList("foo", "bar")).setServerAssignorName("range").build();
    List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember3), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

        {
            put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6)));
            put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3)));
        }
    }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId1, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId2, mkAssignment(mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 1))), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId3, mkAssignment(mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 2))), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember3));
    assertRecordsEquals(expectedRecords.subList(0, 3), result.records().subList(0, 3));
    assertUnorderedListEquals(expectedRecords.subList(3, 6), result.records().subList(3, 6));
    assertRecordsEquals(expectedRecords.subList(6, 8), result.records().subList(6, 8));
}
293903.881114kafka
public void testListGroups() {
    String consumerGroupId = "consumer-group-id";
    String classicGroupId = "classic-group-id";
    String memberId1 = Uuid.randomUuid().toString();
    String fooTopicName = "foo";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withConsumerGroup(new ConsumerGroupBuilder(consumerGroupId, 10)).build();
    context.replay(GroupMetadataManagerTestContext.newGroupMetadataRecord(classicGroupId, new GroupMetadataValue().setMembers(Collections.emptyList()).setGeneration(2).setLeader(null).setProtocolType("classic").setProtocol("range").setCurrentStateTimestamp(context.time.milliseconds()), MetadataVersion.latestTesting()));
    context.commit();
    ClassicGroup classicGroup = context.groupMetadataManager.getOrMaybeCreateClassicGroup(classicGroupId, false);
    context.replay(CoordinatorRecordHelpers.newMemberSubscriptionRecord(consumerGroupId, new ConsumerGroupMember.Builder(memberId1).setSubscribedTopicNames(Collections.singletonList(fooTopicName)).build()));
    context.replay(CoordinatorRecordHelpers.newGroupEpochRecord(consumerGroupId, 11));
    Map<String, ListGroupsResponseData.ListedGroup> actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    Map<String, ListGroupsResponseData.ListedGroup> expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(classicGroup.groupId()).setProtocolType("classic").setGroupState(EMPTY.toString()).setGroupType(Group.GroupType.CLASSIC.toString()), new ListGroupsResponseData.ListedGroup().setGroupId(consumerGroupId).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setGroupState(ConsumerGroup.ConsumerGroupState.EMPTY.toString()).setGroupType(Group.GroupType.CONSUMER.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.singletonList("empty"), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    context.commit();
    actualAllGroupMap = context.sendListGroups(Collections.singletonList("assigning"), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(consumerGroupId).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString()).setGroupType(Group.GroupType.CONSUMER.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.singletonList("Empty"), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(classicGroup.groupId()).setProtocolType("classic").setGroupState(EMPTY.toString()).setGroupType(Group.GroupType.CLASSIC.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList(Group.GroupType.CLASSIC.toString())).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(classicGroup.groupId()).setProtocolType("classic").setGroupState(EMPTY.toString()).setGroupType(Group.GroupType.CLASSIC.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Consumer")).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(consumerGroupId).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString()).setGroupType(Group.GroupType.CONSUMER.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Arrays.asList("empty", "Assigning"), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Stream.of(new ListGroupsResponseData.ListedGroup().setGroupId(classicGroup.groupId()).setProtocolType(Group.GroupType.CLASSIC.toString()).setGroupState(EMPTY.toString()).setGroupType(Group.GroupType.CLASSIC.toString()), new ListGroupsResponseData.ListedGroup().setGroupId(consumerGroupId).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setGroupState(ConsumerGroup.ConsumerGroupState.ASSIGNING.toString()).setGroupType(Group.GroupType.CONSUMER.toString())).collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.emptyList(), Collections.singletonList("Invalid")).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Collections.emptyMap();
    assertEquals(expectAllGroupMap, actualAllGroupMap);
    actualAllGroupMap = context.sendListGroups(Collections.singletonList("Invalid"), Collections.emptyList()).stream().collect(Collectors.toMap(ListGroupsResponseData.ListedGroup::groupId, Function.identity()));
    expectAllGroupMap = Collections.emptyMap();
    assertEquals(expectAllGroupMap, actualAllGroupMap);
}
293482.172122kafka
public void testJoiningConsumerGroupWithNewDynamicMember() throws Exception {
    String groupId = "group-id";
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    Uuid barTopicId = Uuid.randomUuid();
    String barTopicName = "bar";
    for (short version = ConsumerProtocolSubscription.LOWEST_SUPPORTED_VERSION; version <= ConsumerProtocolSubscription.HIGHEST_SUPPORTED_VERSION; version++) {
        String memberId = Uuid.randomUuid().toString();
        MockPartitionAssignor assignor = new MockPartitionAssignor("range");
        GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 2).addTopic(barTopicId, barTopicName, 1).addRacks().build()).withConsumerGroup(new ConsumerGroupBuilder(groupId, 10).withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {

            {
                put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
            }
        }).withMember(new ConsumerGroupMember.Builder(memberId).setState(MemberState.STABLE).setMemberEpoch(10).setPreviousMemberEpoch(10).setAssignedPartitions(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))).build()).withAssignment(memberId, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))).withAssignmentEpoch(10)).build();
        JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId(groupId).withMemberId(UNKNOWN_MEMBER_ID).withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(Arrays.asList(fooTopicName, barTopicName), Collections.emptyList(), version)).build();
        GroupMetadataManagerTestContext.JoinResult firstJoinResult = context.sendClassicGroupJoin(request, true);
        assertTrue(firstJoinResult.records.isEmpty());
        firstJoinResult.appendFuture.complete(null);
        assertTrue(firstJoinResult.joinFuture.isDone());
        assertEquals(Errors.MEMBER_ID_REQUIRED.code(), firstJoinResult.joinFuture.get().errorCode());
        String newMemberId = firstJoinResult.joinFuture.get().memberId();
        assertNotEquals("", newMemberId);
        assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

            {
                put(memberId, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0))));
                put(newMemberId, new MemberAssignment(mkAssignment(mkTopicAssignment(barTopicId, 0))));
            }
        }));
        JoinGroupRequestData secondRequest = new JoinGroupRequestData().setGroupId(request.groupId()).setMemberId(newMemberId).setProtocolType(request.protocolType()).setProtocols(request.protocols()).setSessionTimeoutMs(request.sessionTimeoutMs()).setRebalanceTimeoutMs(request.rebalanceTimeoutMs()).setReason(request.reason());
        GroupMetadataManagerTestContext.JoinResult secondJoinResult = context.sendClassicGroupJoin(secondRequest, true);
        ConsumerGroupMember expectedMember = new ConsumerGroupMember.Builder(newMemberId).setMemberEpoch(11).setPreviousMemberEpoch(0).setState(MemberState.STABLE).setClientId("client").setClientHost("localhost/127.0.0.1").setSubscribedTopicNames(Arrays.asList(fooTopicName, barTopicName)).setRebalanceTimeoutMs(500).setAssignedPartitions(assignor.targetPartitions(newMemberId)).setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata().setSessionTimeoutMs(request.sessionTimeoutMs()).setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(request.protocols()))).build();
        List<CoordinatorRecord> expectedRecords = Arrays.asList(CoordinatorRecordHelpers.newMemberSubscriptionRecord(groupId, expectedMember), CoordinatorRecordHelpers.newGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() {

            {
                put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
                put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 1, mkMapOfPartitionRacks(1)));
            }
        }), CoordinatorRecordHelpers.newGroupEpochRecord(groupId, 11), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, memberId, assignor.targetPartitions(memberId)), CoordinatorRecordHelpers.newTargetAssignmentRecord(groupId, newMemberId, assignor.targetPartitions(newMemberId)), CoordinatorRecordHelpers.newTargetAssignmentEpochRecord(groupId, 11), CoordinatorRecordHelpers.newCurrentAssignmentRecord(groupId, expectedMember));
        assertRecordsEquals(expectedRecords.subList(0, 3), secondJoinResult.records.subList(0, 3));
        assertUnorderedListEquals(expectedRecords.subList(3, 5), secondJoinResult.records.subList(3, 5));
        assertRecordsEquals(expectedRecords.subList(5, 7), secondJoinResult.records.subList(5, 7));
        secondJoinResult.appendFuture.complete(null);
        assertTrue(secondJoinResult.joinFuture.isDone());
        assertEquals(new JoinGroupResponseData().setMemberId(newMemberId).setGenerationId(11).setProtocolType(ConsumerProtocol.PROTOCOL_TYPE).setProtocolName("range"), secondJoinResult.joinFuture.get());
        context.assertSessionTimeout(groupId, newMemberId, request.sessionTimeoutMs());
        context.assertSyncTimeout(groupId, newMemberId, request.rebalanceTimeoutMs());
    }
}
293944.683120spring-framework
 void actualPathMatching(SimpleUrlHandlerMapping mapping, WebApplicationContext wac) throws Exception {
    Object bean = wac.getBean("mainController");
    Object defaultBean = wac.getBean("starController");
    MockHttpServletRequest request = new MockHttpServletRequest("GET", "/pathmatchingTest.html");
    HandlerExecutionChain chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    assertThat(request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE)).isEqualTo("/pathmatchingTest.html");
    request = new MockHttpServletRequest("GET", "welcome.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    assertThat(request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE)).isEqualTo("welcome.html");
    request = new MockHttpServletRequest("GET", "/pathmatchingAA.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    assertThat(request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE)).isEqualTo("pathmatchingAA.html");
    request = new MockHttpServletRequest("GET", "/pathmatchingA.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    assertThat(request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE)).isEqualTo("/pathmatchingA.html");
    request = new MockHttpServletRequest("GET", "/administrator/pathmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/administrator/test/pathmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/administratort/pathmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/bla.jsp");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/administrator/another/bla.xml");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/administrator/another/bla.gif");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/administrator/test/testlastbit");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/administrator/test/testla");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    if (mapping.getPatternParser() != null) {
        request = new MockHttpServletRequest("GET", "/administrator/testing/longer/bla");
        chain = getHandler(mapping, request);
        assertThat(chain.getHandler()).isSameAs(bean);
        request = new MockHttpServletRequest("GET", "/administrator/testing/longer/test.jsp");
        chain = getHandler(mapping, request);
        assertThat(chain.getHandler()).isSameAs(bean);
    }
    request = new MockHttpServletRequest("GET", "/administrator/testing/longer2/notmatching/notmatching");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/shortpattern/testing/toolong");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/XXpathXXmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/pathXXmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/XpathXXmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/XXpathmatching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/show12.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/show123.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/show1.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/reallyGood-test-is-this.jpeg");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/reallyGood-tst-is-this.jpeg");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/testing/test.jpeg");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/testing/test.jpg");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/anotherTest");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(bean);
    request = new MockHttpServletRequest("GET", "/stillAnotherTest");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/outofpattern*ye");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/test't%20est/path'm%20atching.html");
    chain = getHandler(mapping, request);
    assertThat(chain.getHandler()).isSameAs(defaultBean);
    request = new MockHttpServletRequest("GET", "/test%26t%20est/path%26m%20atching.html");
    chain = getHandler(mapping, request);
    if (!mapping.getPathPatternHandlerMap().isEmpty()) {
        assertThat(chain.getHandler()).as("PathPattern always matches to encoded paths.").isSameAs(bean);
    } else {
        assertThat(chain.getHandler()).as("PathMatcher should not match encoded pattern with urlDecode=true").isSameAs(defaultBean);
    }
}
291184.0831111wildfly
private void parseCacheElement(XMLExtendedStreamReader reader, PathAddress cacheAddress, Map<PathAddress, ModelNode> operations) throws XMLStreamException {
    XMLElement element = XMLElement.forName(reader.getLocalName());
    switch(element) {
        case EVICTION:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                this.parseEviction(reader, cacheAddress, operations);
                break;
            }
        case EXPIRATION:
            {
                this.parseExpiration(reader, cacheAddress, operations);
                break;
            }
        case LOCKING:
            {
                this.parseLocking(reader, cacheAddress, operations);
                break;
            }
        case TRANSACTION:
            {
                this.parseTransaction(reader, cacheAddress, operations);
                break;
            }
        case STORE:
            {
                this.parseCustomStore(reader, cacheAddress, operations);
                break;
            }
        case FILE_STORE:
            {
                this.parseFileStore(reader, cacheAddress, operations);
                break;
            }
        case REMOTE_STORE:
            {
                this.parseRemoteStore(reader, cacheAddress, operations);
                break;
            }
        case HOTROD_STORE:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_6_0)) {
                    this.parseHotRodStore(reader, cacheAddress, operations);
                    break;
                }
            }
        case JDBC_STORE:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    this.parseJDBCStore(reader, cacheAddress, operations);
                } else {
                    throw ParseUtils.unexpectedElement(reader);
                }
                break;
            }
        case STRING_KEYED_JDBC_STORE:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                this.parseStringKeyedJDBCStore(reader, cacheAddress, operations);
                break;
            }
        case BINARY_KEYED_JDBC_STORE:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                this.parseBinaryKeyedJDBCStore(reader, cacheAddress, operations);
                break;
            }
        case MIXED_KEYED_JDBC_STORE:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_14_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                this.parseMixedKeyedJDBCStore(reader, cacheAddress, operations);
                break;
            }
        case INDEXING:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_4_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                this.parseIndexing(reader, cacheAddress, operations);
                break;
            }
        case OBJECT_MEMORY:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    this.parseHeapMemory(reader, cacheAddress, operations);
                    break;
                }
            }
        case BINARY_MEMORY:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0)) {
                    throw ParseUtils.unexpectedElement(reader);
                }
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    this.parseBinaryMemory(reader, cacheAddress, operations);
                    break;
                }
            }
        case OFF_HEAP_MEMORY:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_5_0)) {
                    this.parseOffHeapMemory(reader, cacheAddress, operations);
                    break;
                }
            }
        case HEAP_MEMORY:
            {
                if (this.schema.since(InfinispanSubsystemSchema.VERSION_11_0)) {
                    this.parseHeapMemory(reader, cacheAddress, operations);
                    break;
                }
            }
        default:
            {
                throw ParseUtils.unexpectedElement(reader);
            }
    }
}
293202.241895wildfly
protected void performRuntime(final OperationContext context, final ModelNode operation, final Resource resource) throws OperationFailedException {
    ModelNode model = resource.getModel();
    String name = JcaDistributedWorkManagerDefinition.DWmParameters.NAME.getAttribute().resolveModelAttribute(context, model).asString();
    boolean elytronEnabled = JcaWorkManagerDefinition.WmParameters.ELYTRON_ENABLED.getAttribute().resolveModelAttribute(context, resource.getModel()).asBoolean();
    String policy = JcaDistributedWorkManagerDefinition.DWmParameters.POLICY.getAttribute().resolveModelAttribute(context, model).asString();
    String selector = JcaDistributedWorkManagerDefinition.DWmParameters.SELECTOR.getAttribute().resolveModelAttribute(context, model).asString();
    ServiceTarget serviceTarget = context.getServiceTarget();
    NamedDistributedWorkManager namedDistributedWorkManager = new NamedDistributedWorkManager(name, elytronEnabled);
    if (policy != null && !policy.trim().isEmpty()) {
        switch(JcaDistributedWorkManagerDefinition.PolicyValue.valueOf(policy)) {
            case NEVER:
                {
                    namedDistributedWorkManager.setPolicy(new Never());
                    break;
                }
            case ALWAYS:
                {
                    namedDistributedWorkManager.setPolicy(new Always());
                    break;
                }
            case WATERMARK:
                {
                    namedDistributedWorkManager.setPolicy(new WaterMark());
                    break;
                }
            default:
                throw ROOT_LOGGER.unsupportedPolicy(policy);
        }
        Injection injector = new Injection();
        for (Map.Entry<String, String> entry : ((PropertiesAttributeDefinition) JcaDistributedWorkManagerDefinition.DWmParameters.POLICY_OPTIONS.getAttribute()).unwrap(context, model).entrySet()) {
            try {
                injector.inject(namedDistributedWorkManager.getPolicy(), entry.getKey(), entry.getValue());
            } catch (Exception e) {
                ROOT_LOGGER.unsupportedPolicyOption(entry.getKey());
            }
        }
    } else {
        namedDistributedWorkManager.setPolicy(new WaterMark());
    }
    if (selector != null && !selector.trim().isEmpty()) {
        switch(JcaDistributedWorkManagerDefinition.SelectorValue.valueOf(selector)) {
            case FIRST_AVAILABLE:
                {
                    namedDistributedWorkManager.setSelector(new FirstAvailable());
                    break;
                }
            case MAX_FREE_THREADS:
                {
                    namedDistributedWorkManager.setSelector(new MaxFreeThreads());
                    break;
                }
            case PING_TIME:
                {
                    namedDistributedWorkManager.setSelector(new PingTime());
                    break;
                }
            default:
                throw ROOT_LOGGER.unsupportedSelector(selector);
        }
        Injection injector = new Injection();
        for (Map.Entry<String, String> entry : ((PropertiesAttributeDefinition) JcaDistributedWorkManagerDefinition.DWmParameters.SELECTOR_OPTIONS.getAttribute()).unwrap(context, model).entrySet()) {
            try {
                injector.inject(namedDistributedWorkManager.getSelector(), entry.getKey(), entry.getValue());
            } catch (Exception e) {
                ROOT_LOGGER.unsupportedSelectorOption(entry.getKey());
            }
        }
    } else {
        namedDistributedWorkManager.setSelector(new PingTime());
    }
    DistributedWorkManagerService wmService = new DistributedWorkManagerService(namedDistributedWorkManager);
    ServiceBuilder<NamedDistributedWorkManager> builder = serviceTarget.addService(ConnectorServices.WORKMANAGER_SERVICE.append(name), wmService);
    builder.addDependency(ClusteringDefaultRequirement.COMMAND_DISPATCHER_FACTORY.getServiceName(context), CommandDispatcherFactory.class, wmService.getCommandDispatcherFactoryInjector());
    if (resource.hasChild(PathElement.pathElement(Element.LONG_RUNNING_THREADS.getLocalName()))) {
        builder.addDependency(ThreadsServices.EXECUTOR.append(WORKMANAGER_LONG_RUNNING).append(name), Executor.class, wmService.getExecutorLongInjector());
    }
    builder.addDependency(ThreadsServices.EXECUTOR.append(WORKMANAGER_SHORT_RUNNING).append(name), Executor.class, wmService.getExecutorShortInjector());
    builder.addDependency(TxnServices.JBOSS_TXN_CONTEXT_XA_TERMINATOR, JBossContextXATerminator.class, wmService.getXaTerminatorInjector()).setInitialMode(ServiceController.Mode.ON_DEMAND).install();
    WorkManagerStatisticsService wmStatsService = new WorkManagerStatisticsService(context.getResourceRegistrationForUpdate(), name, true);
    serviceTarget.addService(ConnectorServices.WORKMANAGER_STATS_SERVICE.append(name), wmStatsService).addDependency(ConnectorServices.WORKMANAGER_SERVICE.append(name), WorkManager.class, wmStatsService.getWorkManagerInjector()).setInitialMode(ServiceController.Mode.PASSIVE).install();
    DistributedWorkManagerStatisticsService dwmStatsService = new DistributedWorkManagerStatisticsService(context.getResourceRegistrationForUpdate(), name, true);
    serviceTarget.addService(ConnectorServices.DISTRIBUTED_WORKMANAGER_STATS_SERVICE.append(name), dwmStatsService).addDependency(ConnectorServices.WORKMANAGER_SERVICE.append(name), DistributedWorkManager.class, dwmStatsService.getDistributedWorkManagerInjector()).setInitialMode(ServiceController.Mode.PASSIVE).install();
    PathElement peDistributedWm = PathElement.pathElement(org.jboss.as.connector.subsystems.resourceadapters.Constants.STATISTICS_NAME, "distributed");
    PathElement peLocaldWm = PathElement.pathElement(org.jboss.as.connector.subsystems.resourceadapters.Constants.STATISTICS_NAME, "local");
    final Resource wmResource = new IronJacamarResource.IronJacamarRuntimeResource();
    if (!resource.hasChild(peLocaldWm))
        resource.registerChild(peLocaldWm, wmResource);
    final Resource dwmResource = new IronJacamarResource.IronJacamarRuntimeResource();
    if (!resource.hasChild(peDistributedWm))
        resource.registerChild(peDistributedWm, dwmResource);
}
293536.21796wildfly
protected void performRuntime(OperationContext context, ModelNode operation, final Resource resource) throws OperationFailedException {
    final ModelNode address = operation.require(OP_ADDR);
    PathAddress path = context.getCurrentAddress();
    final String jndiName = JNDI_NAME.resolveModelAttribute(context, operation).asString();
    final String raName = path.getParent().getLastElement().getValue();
    final String archiveOrModuleName;
    ModelNode raModel = context.readResourceFromRoot(path.getParent(), false).getModel();
    final boolean statsEnabled = STATISTICS_ENABLED.resolveModelAttribute(context, raModel).asBoolean();
    if (!raModel.hasDefined(ARCHIVE.getName()) && !raModel.hasDefined(MODULE.getName())) {
        throw ConnectorLogger.ROOT_LOGGER.archiveOrModuleRequired();
    }
    ModelNode resourceModel = resource.getModel();
    if (resourceModel.hasDefined(SECURITY_DOMAIN.getName()))
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(SECURITY_DOMAIN.getName());
    else if (resourceModel.hasDefined(SECURITY_DOMAIN_AND_APPLICATION.getName()))
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(SECURITY_DOMAIN_AND_APPLICATION.getName());
    if (resourceModel.hasDefined(RECOVERY_SECURITY_DOMAIN.getName()))
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(RECOVERY_SECURITY_DOMAIN.getName());
    final ModelNode credentialReference = RECOVERY_CREDENTIAL_REFERENCE.resolveModelAttribute(context, resourceModel);
    boolean hasSecurityDomain = resourceModel.hasDefined(SECURITY_DOMAIN.getName());
    boolean hasSecurityDomainAndApp = resourceModel.hasDefined(SECURITY_DOMAIN_AND_APPLICATION.getName());
    if (hasSecurityDomain) {
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(SECURITY_DOMAIN.getName());
    } else if (hasSecurityDomainAndApp) {
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(SECURITY_DOMAIN_AND_APPLICATION.getName());
    }
    boolean hasRecoverySecurityDomain = resourceModel.hasDefined(RECOVERY_SECURITY_DOMAIN.getName());
    if (hasRecoverySecurityDomain) {
        throw SUBSYSTEM_RA_LOGGER.legacySecurityAttributeNotSupported(RECOVERY_SECURITY_DOMAIN.getName());
    }
    if (raModel.get(ARCHIVE.getName()).isDefined()) {
        archiveOrModuleName = ARCHIVE.resolveModelAttribute(context, raModel).asString();
    } else {
        archiveOrModuleName = MODULE.resolveModelAttribute(context, raModel).asString();
    }
    final String poolName = PathAddress.pathAddress(address).getLastElement().getValue();
    try {
        ServiceName serviceName = ServiceName.of(ConnectorServices.RA_SERVICE, raName, poolName);
        ServiceName raServiceName = ServiceName.of(ConnectorServices.RA_SERVICE, raName);
        final ModifiableResourceAdapter ravalue = ((ModifiableResourceAdapter) context.getServiceRegistry(false).getService(raServiceName).getValue());
        boolean isXa = ravalue.getTransactionSupport() == TransactionSupportEnum.XATransaction;
        final ServiceTarget serviceTarget = context.getServiceTarget();
        final ConnectionDefinitionService service = new ConnectionDefinitionService();
        service.getConnectionDefinitionSupplierInjector().inject(() -> RaOperationUtil.buildConnectionDefinitionObject(context, resourceModel, poolName, isXa, service.getCredentialSourceSupplier().getOptionalValue()));
        final ServiceBuilder<ModifiableConnDef> cdServiceBuilder = serviceTarget.addService(serviceName, service).setInitialMode(ServiceController.Mode.ACTIVE).addDependency(raServiceName, ModifiableResourceAdapter.class, service.getRaInjector());
        if (resourceModel.hasDefined(AUTHENTICATION_CONTEXT.getName())) {
            cdServiceBuilder.requires(context.getCapabilityServiceName(Capabilities.AUTHENTICATION_CONTEXT_CAPABILITY, AUTHENTICATION_CONTEXT.resolveModelAttribute(context, resourceModel).asString(), AuthenticationContext.class));
        } else if (resourceModel.hasDefined(AUTHENTICATION_CONTEXT_AND_APPLICATION.getName())) {
            cdServiceBuilder.requires(context.getCapabilityServiceName(Capabilities.AUTHENTICATION_CONTEXT_CAPABILITY, AUTHENTICATION_CONTEXT_AND_APPLICATION.resolveModelAttribute(context, resourceModel).asString(), AuthenticationContext.class));
        }
        if (resourceModel.hasDefined(RECOVERY_AUTHENTICATION_CONTEXT.getName())) {
            cdServiceBuilder.requires(context.getCapabilityServiceName(Capabilities.AUTHENTICATION_CONTEXT_CAPABILITY, RECOVERY_AUTHENTICATION_CONTEXT.resolveModelAttribute(context, resourceModel).asString(), AuthenticationContext.class));
        }
        if (credentialReference.isDefined()) {
            service.getCredentialSourceSupplier().inject(CredentialReference.getCredentialSourceSupplier(context, RECOVERY_CREDENTIAL_REFERENCE, resourceModel, cdServiceBuilder));
        }
        cdServiceBuilder.install();
        ServiceRegistry registry = context.getServiceRegistry(true);
        final ServiceController<?> RaxmlController = registry.getService(ServiceName.of(ConnectorServices.RA_SERVICE, raName));
        Activation raxml = (Activation) RaxmlController.getValue();
        ServiceName deploymentServiceName = ConnectorServices.getDeploymentServiceName(archiveOrModuleName, raName);
        String bootStrapCtxName = DEFAULT_NAME;
        if (raxml.getBootstrapContext() != null && !raxml.getBootstrapContext().equals("undefined")) {
            bootStrapCtxName = raxml.getBootstrapContext();
        }
        final boolean useJavaContext = USE_JAVA_CONTEXT.resolveModelAttribute(context, raModel).asBoolean();
        ConnectionDefinitionStatisticsService connectionDefinitionStatisticsService = new ConnectionDefinitionStatisticsService(context.getResourceRegistrationForUpdate(), jndiName, useJavaContext, poolName, statsEnabled);
        ServiceBuilder statsServiceBuilder = serviceTarget.addService(serviceName.append(ConnectorServices.STATISTICS_SUFFIX), connectionDefinitionStatisticsService);
        statsServiceBuilder.addDependency(ConnectorServices.BOOTSTRAP_CONTEXT_SERVICE.append(bootStrapCtxName), Object.class, connectionDefinitionStatisticsService.getBootstrapContextInjector()).addDependency(deploymentServiceName, Object.class, connectionDefinitionStatisticsService.getResourceAdapterDeploymentInjector()).setInitialMode(ServiceController.Mode.PASSIVE).install();
        PathElement peCD = PathElement.pathElement(Constants.STATISTICS_NAME, "pool");
        final Resource cdResource = new IronJacamarResource.IronJacamarRuntimeResource();
        resource.registerChild(peCD, cdResource);
        PathElement peExtended = PathElement.pathElement(Constants.STATISTICS_NAME, "extended");
        final Resource extendedResource = new IronJacamarResource.IronJacamarRuntimeResource();
        resource.registerChild(peExtended, extendedResource);
    } catch (Exception e) {
        throw new OperationFailedException(e, new ModelNode().set(ConnectorLogger.ROOT_LOGGER.failedToCreate("ConnectionDefinition", operation, e.getLocalizedMessage())));
    }
}
292298.772797wildfly
public ModelNode buildRequestWithoutHeaders(CommandContext ctx) throws OperationFormatException {
    try {
        if (!ctx.getParsedCommandLine().hasProperties()) {
            throw MessagingLogger.ROOT_LOGGER.missingArguments();
        }
    } catch (CommandFormatException e) {
        throw new OperationFormatException(e.getLocalizedMessage());
    }
    String restype = null;
    String propsStr = null;
    String jndiName = null;
    String[] args = ctx.getArgumentsString().split("\\s+");
    int i = 0;
    while (i < args.length) {
        String arg = args[i++];
        if (arg.equals("--restype")) {
            if (i < args.length) {
                restype = args[i++];
            }
        } else if (arg.equals("--target")) {
        } else if (arg.equals("--description")) {
        } else if (arg.equals("--property")) {
            if (i < args.length) {
                propsStr = args[i++];
            }
        } else if (arg.equals("--enabled")) {
        } else {
            jndiName = arg;
        }
    }
    if (restype == null) {
        throw MessagingLogger.ROOT_LOGGER.missingRestype();
    }
    if (jndiName == null) {
        throw MessagingLogger.ROOT_LOGGER.missingJNDIName();
    }
    String name = null;
    String serverName = "default";
    final Map<String, String> props;
    if (propsStr != null) {
        props = new HashMap<String, String>();
        String[] propsArr = propsStr.split(":");
        for (String prop : propsArr) {
            int equalsIndex = prop.indexOf('=');
            if (equalsIndex < 0 || equalsIndex == prop.length() - 1) {
                throw MessagingLogger.ROOT_LOGGER.failedToParseProperty(prop);
            }
            String propName = prop.substring(0, equalsIndex).trim();
            String propValue = prop.substring(equalsIndex + 1).trim();
            if (propName.isEmpty()) {
                throw MessagingLogger.ROOT_LOGGER.failedToParseProperty(prop);
            }
            if (propName.equals("imqDestinationName") || propName.equalsIgnoreCase("name")) {
                name = propValue;
            } else if ("ClientId".equals(propName)) {
                props.put("client-id", propValue);
            }
        }
    } else {
        props = Collections.emptyMap();
    }
    if (name == null) {
        name = jndiName.replace('/', '_');
    }
    if (restype.equals("jakarta.jms.Queue")) {
        DefaultOperationRequestBuilder builder = new DefaultOperationRequestBuilder();
        builder.addNode("subsystem", "messaging-activemq");
        builder.addNode("server", serverName);
        builder.addNode("jms-queue", name);
        builder.setOperationName("add");
        builder.getModelNode().get("entries").add(jndiName);
        addProperties(props, builder);
        return builder.buildRequest();
    } else if (restype.equals("jakarta.jms.Topic")) {
        DefaultOperationRequestBuilder builder = new DefaultOperationRequestBuilder();
        builder.addNode("subsystem", "messaging-activemq");
        builder.addNode("server", serverName);
        builder.addNode("jms-topic", name);
        builder.setOperationName("add");
        builder.getModelNode().get("entries").add(jndiName);
        addProperties(props, builder);
        return builder.buildRequest();
    } else if (restype.equals("jakarta.jms.ConnectionFactory") || restype.equals("jakarta.jms.TopicConnectionFactory") || restype.equals("jakarta.jms.QueueConnectionFactory")) {
        DefaultOperationRequestBuilder builder = new DefaultOperationRequestBuilder();
        builder.addNode("subsystem", "messaging-activemq");
        builder.addNode("server", serverName);
        builder.addNode("connection-factory", name);
        builder.setOperationName("add");
        builder.getModelNode().get("entries").add(jndiName);
        addProperties(props, builder);
        return builder.buildRequest();
    } else {
        throw MessagingLogger.ROOT_LOGGER.unsupportedResourceType(restype);
    }
}
292861.732199wildfly
protected void executeRuntimeStep(OperationContext context, ModelNode operation) throws OperationFailedException {
    if (rollbackOperationIfServerNotActive(context, operation)) {
        return;
    }
    final String operationName = operation.require(OP).asString();
    final ActiveMQBroker server = getServer(context, operation);
    if (server == null) {
        PathAddress address = PathAddress.pathAddress(operation.require(OP_ADDR));
        throw ControllerLogger.ROOT_LOGGER.managementResourceNotFound(address);
    }
    final ActiveMQServerControl serverControl = server.getActiveMQServerControl();
    try {
        if (LIST_CONNECTIONS_AS_JSON.equals(operationName)) {
            String json = serverControl.listConnectionsAsJSON();
            context.getResult().set(json);
            final JsonArrayBuilder enrichedConnections = Json.createArrayBuilder();
            try (JsonReader reader = Json.createReader(new StringReader(json))) {
                final JsonArray connections = reader.readArray();
                for (int i = 0; i < connections.size(); i++) {
                    final JsonObject originalConnection = connections.getJsonObject(i);
                    final JsonObject enrichedConnection = enrichConnection(originalConnection, serverControl);
                    enrichedConnections.add(enrichedConnection);
                }
            }
            String enrichedJSON = enrichedConnections.build().toString();
            context.getResult().set(enrichedJSON);
        } else if (LIST_CONSUMERS_AS_JSON.equals(operationName)) {
            String connectionID = CONNECTION_ID.resolveModelAttribute(context, operation).asString();
            String json = serverControl.listConsumersAsJSON(connectionID);
            final JsonArrayBuilder enrichedConsumers = Json.createArrayBuilder();
            try (JsonReader reader = Json.createReader(new StringReader(json))) {
                final JsonArray consumers = reader.readArray();
                for (int i = 0; i < consumers.size(); i++) {
                    final JsonObject originalConsumer = consumers.getJsonObject(i);
                    final JsonObject enrichedConsumer = enrichConsumer(originalConsumer, server);
                    enrichedConsumers.add(enrichedConsumer);
                }
            }
            String enrichedJSON = enrichedConsumers.build().toString();
            context.getResult().set(enrichedJSON);
        } else if (LIST_ALL_CONSUMERS_AS_JSON.equals(operationName)) {
            String json = serverControl.listAllConsumersAsJSON();
            final JsonArrayBuilder enrichedConsumers = Json.createArrayBuilder();
            try (JsonReader reader = Json.createReader(new StringReader(json))) {
                final JsonArray consumers = reader.readArray();
                for (int i = 0; i < consumers.size(); i++) {
                    final JsonObject originalConsumer = consumers.getJsonObject(i);
                    final JsonObject enrichedConsumer = enrichConsumer(originalConsumer, server);
                    enrichedConsumers.add(enrichedConsumer);
                }
            }
            String enrichedJSON = enrichedConsumers.build().toString();
            context.getResult().set(enrichedJSON);
        } else if (LIST_TARGET_DESTINATIONS.equals(operationName)) {
            String sessionID = SESSION_ID.resolveModelAttribute(context, operation).asString();
            String[] list = listTargetDestinations(server, sessionID);
            reportListOfStrings(context, list);
        } else if (GET_LAST_SENT_MESSAGE_ID.equals(operationName)) {
            String sessionID = SESSION_ID.resolveModelAttribute(context, operation).asString();
            String addressName = ADDRESS_NAME.resolveModelAttribute(context, operation).asString();
            ServerSession session = ((ActiveMQServer) server.getDelegate()).getSessionByID(sessionID);
            if (session != null) {
                for (ServerProducer producer : session.getServerProducers()) {
                    if (addressName.equals(producer.getAddress())) {
                        context.getResult().set(producer.getLastProducedMessageID().toString());
                        break;
                    }
                }
            }
        } else if (GET_SESSION_CREATION_TIME.equals(operationName)) {
            String sessionID = SESSION_ID.resolveModelAttribute(context, operation).asString();
            ServerSession session = ((ActiveMQServer) server.getDelegate()).getSessionByID(sessionID);
            if (session != null) {
                String time = String.valueOf(session.getCreationTime());
                context.getResult().set(time);
            }
        } else if (LIST_SESSIONS_AS_JSON.equals(operationName)) {
            String connectionID = CONNECTION_ID.resolveModelAttribute(context, operation).asString();
            String json = serverControl.listSessionsAsJSON(connectionID);
            context.getResult().set(json);
        } else if (LIST_PREPARED_TRANSACTION_JMS_DETAILS_AS_JSON.equals(operationName)) {
            String json = serverControl.listPreparedTransactionDetailsAsJSON();
            context.getResult().set(json);
        } else if (LIST_PREPARED_TRANSACTION_JMS_DETAILS_AS_HTML.equals(operationName)) {
            String html = serverControl.listPreparedTransactionDetailsAsHTML();
            context.getResult().set(html);
        } else {
            throw MessagingLogger.ROOT_LOGGER.unsupportedOperation(operationName);
        }
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception e) {
        context.getFailureDescription().set(e.getLocalizedMessage());
    }
}
292016.0723110wildfly
private void parseJSSE(List<ModelNode> list, PathAddress parentAddress, XMLExtendedStreamReader reader) throws XMLStreamException {
    ModelNode op = appendAddOperation(list, parentAddress, JSSE, CLASSIC);
    EnumSet<Attribute> visited = EnumSet.noneOf(Attribute.class);
    EnumSet<Attribute> required = EnumSet.noneOf(Attribute.class);
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        requireNoNamespaceAttribute(reader, i);
        final String value = reader.getAttributeValue(i);
        final Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case KEYSTORE_PASSWORD:
                {
                    ComplexAttributes.PASSWORD.parseAndSetParameter(value, op.get(KEYSTORE), reader);
                    visited.add(attribute);
                    break;
                }
            case KEYSTORE_TYPE:
                {
                    ComplexAttributes.TYPE.parseAndSetParameter(value, op.get(KEYSTORE), reader);
                    required.add(Attribute.KEYSTORE_PASSWORD);
                    break;
                }
            case KEYSTORE_URL:
                {
                    ComplexAttributes.URL.parseAndSetParameter(value, op.get(KEYSTORE), reader);
                    required.add(Attribute.KEYSTORE_PASSWORD);
                    break;
                }
            case KEYSTORE_PROVIDER:
                {
                    ComplexAttributes.PROVIDER.parseAndSetParameter(value, op.get(KEYSTORE), reader);
                    required.add(Attribute.KEYSTORE_PASSWORD);
                    break;
                }
            case KEYSTORE_PROVIDER_ARGUMENT:
                {
                    ComplexAttributes.PROVIDER_ARGUMENT.parseAndSetParameter(value, op.get(KEYSTORE), reader);
                    required.add(Attribute.KEYSTORE_PASSWORD);
                    break;
                }
            case KEY_MANAGER_FACTORY_PROVIDER:
                {
                    ComplexAttributes.PROVIDER.parseAndSetParameter(value, op.get(KEY_MANAGER), reader);
                    break;
                }
            case KEY_MANAGER_FACTORY_ALGORITHM:
                {
                    ComplexAttributes.ALGORITHM.parseAndSetParameter(value, op.get(KEY_MANAGER), reader);
                    break;
                }
            case TRUSTSTORE_PASSWORD:
                {
                    ComplexAttributes.PASSWORD.parseAndSetParameter(value, op.get(TRUSTSTORE), reader);
                    visited.add(attribute);
                    break;
                }
            case TRUSTSTORE_TYPE:
                {
                    ComplexAttributes.TYPE.parseAndSetParameter(value, op.get(TRUSTSTORE), reader);
                    required.add(Attribute.TRUSTSTORE_PASSWORD);
                    break;
                }
            case TRUSTSTORE_URL:
                {
                    ComplexAttributes.URL.parseAndSetParameter(value, op.get(TRUSTSTORE), reader);
                    required.add(Attribute.TRUSTSTORE_PASSWORD);
                    break;
                }
            case TRUSTSTORE_PROVIDER:
                {
                    ComplexAttributes.PROVIDER.parseAndSetParameter(value, op.get(TRUSTSTORE), reader);
                    required.add(Attribute.TRUSTSTORE_PASSWORD);
                    break;
                }
            case TRUSTSTORE_PROVIDER_ARGUMENT:
                {
                    ComplexAttributes.PROVIDER_ARGUMENT.parseAndSetParameter(value, op.get(TRUSTSTORE), reader);
                    required.add(Attribute.TRUSTSTORE_PASSWORD);
                    break;
                }
            case TRUST_MANAGER_FACTORY_PROVIDER:
                {
                    ComplexAttributes.PROVIDER.parseAndSetParameter(value, op.get(TRUST_MANAGER), reader);
                    break;
                }
            case TRUST_MANAGER_FACTORY_ALGORITHM:
                {
                    ComplexAttributes.ALGORITHM.parseAndSetParameter(value, op.get(TRUST_MANAGER), reader);
                    break;
                }
            case CLIENT_ALIAS:
                {
                    JSSEResourceDefinition.CLIENT_ALIAS.parseAndSetParameter(value, op, reader);
                    break;
                }
            case SERVER_ALIAS:
                {
                    JSSEResourceDefinition.SERVER_ALIAS.parseAndSetParameter(value, op, reader);
                    break;
                }
            case CLIENT_AUTH:
                {
                    JSSEResourceDefinition.CLIENT_AUTH.parseAndSetParameter(value, op, reader);
                    break;
                }
            case SERVICE_AUTH_TOKEN:
                {
                    JSSEResourceDefinition.SERVICE_AUTH_TOKEN.parseAndSetParameter(value, op, reader);
                    break;
                }
            case CIPHER_SUITES:
                {
                    JSSEResourceDefinition.CIPHER_SUITES.parseAndSetParameter(value, op, reader);
                    break;
                }
            case PROTOCOLS:
                {
                    JSSEResourceDefinition.PROTOCOLS.parseAndSetParameter(value, op, reader);
                    break;
                }
            default:
                throw unexpectedAttribute(reader, i);
        }
    }
    if (!visited.containsAll(required)) {
        throw SecurityLogger.ROOT_LOGGER.xmlStreamExceptionMissingAttribute(Attribute.KEYSTORE_PASSWORD.getLocalName(), Attribute.TRUSTSTORE_PASSWORD.getLocalName(), reader.getLocation());
    }
    parseProperties(Element.PROPERTY.getLocalName(), reader, op, JSSEResourceDefinition.ADDITIONAL_PROPERTIES);
}
291847.3210132wildfly
public void testInvalidEnableSSL() throws Exception {
    assertEmptyModel(null);
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-password=" + KEY_STORE_PASSWORD + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --no-reload" + " --server-name=foo");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-path=" + "foo.bar" + " --key-store-password=" + KEY_STORE_PASSWORD + " --no-reload");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --no-reload");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-name=" + "foo.bar" + " --no-reload");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        ctx.handle("/subsystem=elytron/key-store=foo:add(path=foo.bar, type=JKS" + ", relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + ", credential-reference={clear-text=" + KEY_STORE_PASSWORD + "})");
        try {
            try {
                ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-name=foo" + " --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-password=" + KEY_STORE_PASSWORD + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --no-reload");
            } catch (Exception ex) {
                failed = true;
            }
        } finally {
            ctx.handle("/subsystem=elytron/key-store=foo:remove()");
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        ctx.handle("/subsystem=elytron/key-store=foo:add(path=foo.bar, type=JKS" + ", relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + ", credential-reference={clear-text=" + KEY_STORE_PASSWORD + "})");
        try {
            try {
                ctx.handle("security enable-ssl-http-server --override-ssl-context --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-password=" + KEY_STORE_PASSWORD + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --trusted-certificate-path=" + clientCertificate.getAbsolutePath() + " --trust-store-name=foo" + " --trust-store-file-name=" + GENERATED_TRUST_STORE_FILE_NAME + " --trust-store-file-password=" + GENERATED_KEY_STORE_PASSWORD + " --new-trust-store-name=" + TRUST_STORE_NAME + " --new-trust-manager-name=" + TRUST_MANAGER_NAME + " --new-key-store-name=" + KEY_STORE_NAME + " --new-key-manager-name=" + KEY_MANAGER_NAME + " --new-ssl-context-name=" + SSL_CONTEXT_NAME + " --no-reload");
            } catch (Exception ex) {
                failed = true;
            }
        } finally {
            ctx.handle("/subsystem=elytron/key-store=foo:remove()");
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-password=" + KEY_STORE_PASSWORD + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --no-reload" + " --https-listener-name=" + "UNKNOWN");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
    {
        boolean failed = false;
        try {
            ctx.handle("security enable-ssl-http-server --key-store-path=" + SERVER_KEY_STORE_FILE + " --key-store-password=" + KEY_STORE_PASSWORD + " --key-store-path-relative-to=" + Util.JBOSS_SERVER_CONFIG_DIR + " --no-reload" + " --add-https-listener --https-listener-name=bar --https-listener-socket-binding-name=UNKNOWN");
        } catch (Exception ex) {
            failed = true;
        }
        Assert.assertTrue(failed);
        assertEmptyModel(null);
    }
}
292765.582992wildfly
public void writeContent(XMLExtendedStreamWriter writer, SubsystemMarshallingContext context) throws XMLStreamException {
    context.startSubsystemElement(Namespace.CURRENT.getUriString(), false);
    ModelNode node = context.getModelNode();
    writer.writeStartElement(Element.CORE_ENVIRONMENT.getLocalName());
    TransactionSubsystemRootResourceDefinition.NODE_IDENTIFIER.marshallAsAttribute(node, writer);
    writeProcessId(writer, node);
    writer.writeEndElement();
    if (TransactionSubsystemRootResourceDefinition.BINDING.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.STATUS_BINDING.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.RECOVERY_LISTENER.isMarshallable(node)) {
        writer.writeStartElement(Element.RECOVERY_ENVIRONMENT.getLocalName());
        TransactionSubsystemRootResourceDefinition.BINDING.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.STATUS_BINDING.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.RECOVERY_LISTENER.marshallAsAttribute(node, writer);
        writer.writeEndElement();
    }
    if (TransactionSubsystemRootResourceDefinition.STATISTICS_ENABLED.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.ENABLE_TSM_STATUS.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.DEFAULT_TIMEOUT.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.MAXIMUM_TIMEOUT.isMarshallable(node)) {
        writer.writeStartElement(Element.COORDINATOR_ENVIRONMENT.getLocalName());
        TransactionSubsystemRootResourceDefinition.STATISTICS_ENABLED.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.ENABLE_TSM_STATUS.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.DEFAULT_TIMEOUT.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.MAXIMUM_TIMEOUT.marshallAsAttribute(node, writer);
        writer.writeEndElement();
    }
    if (TransactionSubsystemRootResourceDefinition.OBJECT_STORE_RELATIVE_TO.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.OBJECT_STORE_PATH.isMarshallable(node)) {
        writer.writeStartElement(Element.OBJECT_STORE.getLocalName());
        TransactionSubsystemRootResourceDefinition.OBJECT_STORE_PATH.marshallAsAttribute(node, writer);
        TransactionSubsystemRootResourceDefinition.OBJECT_STORE_RELATIVE_TO.marshallAsAttribute(node, writer);
        writer.writeEndElement();
    }
    if (node.hasDefined(CommonAttributes.JTS) && node.get(CommonAttributes.JTS).asBoolean()) {
        writer.writeStartElement(Element.JTS.getLocalName());
        writer.writeEndElement();
    }
    if (node.hasDefined(CommonAttributes.USE_JOURNAL_STORE) && node.get(CommonAttributes.USE_JOURNAL_STORE).asBoolean()) {
        writer.writeStartElement(Element.USE_JOURNAL_STORE.getLocalName());
        TransactionSubsystemRootResourceDefinition.JOURNAL_STORE_ENABLE_ASYNC_IO.marshallAsAttribute(node, writer);
        writer.writeEndElement();
    }
    if (node.hasDefined(CommonAttributes.USE_JDBC_STORE) && node.get(CommonAttributes.USE_JDBC_STORE).asBoolean()) {
        writer.writeStartElement(Element.JDBC_STORE.getLocalName());
        TransactionSubsystemRootResourceDefinition.JDBC_STORE_DATASOURCE.marshallAsAttribute(node, writer);
        if (TransactionSubsystemRootResourceDefinition.JDBC_ACTION_STORE_TABLE_PREFIX.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.JDBC_ACTION_STORE_DROP_TABLE.isMarshallable(node)) {
            writer.writeEmptyElement(Element.JDBC_ACTION_STORE.getLocalName());
            TransactionSubsystemRootResourceDefinition.JDBC_ACTION_STORE_TABLE_PREFIX.marshallAsAttribute(node, writer);
            TransactionSubsystemRootResourceDefinition.JDBC_ACTION_STORE_DROP_TABLE.marshallAsAttribute(node, writer);
        }
        if (TransactionSubsystemRootResourceDefinition.JDBC_COMMUNICATION_STORE_TABLE_PREFIX.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.JDBC_COMMUNICATION_STORE_DROP_TABLE.isMarshallable(node)) {
            writer.writeEmptyElement(Element.JDBC_COMMUNICATION_STORE.getLocalName());
            TransactionSubsystemRootResourceDefinition.JDBC_COMMUNICATION_STORE_TABLE_PREFIX.marshallAsAttribute(node, writer);
            TransactionSubsystemRootResourceDefinition.JDBC_COMMUNICATION_STORE_DROP_TABLE.marshallAsAttribute(node, writer);
        }
        if (TransactionSubsystemRootResourceDefinition.JDBC_STATE_STORE_TABLE_PREFIX.isMarshallable(node) || TransactionSubsystemRootResourceDefinition.JDBC_STATE_STORE_DROP_TABLE.isMarshallable(node)) {
            writer.writeEmptyElement(Element.JDBC_STATE_STORE.getLocalName());
            TransactionSubsystemRootResourceDefinition.JDBC_STATE_STORE_TABLE_PREFIX.marshallAsAttribute(node, writer);
            TransactionSubsystemRootResourceDefinition.JDBC_STATE_STORE_DROP_TABLE.marshallAsAttribute(node, writer);
        }
        writer.writeEndElement();
    }
    if (node.hasDefined(CommonAttributes.CM_RESOURCE) && !node.get(CommonAttributes.CM_RESOURCE).asList().isEmpty()) {
        writer.writeStartElement(Element.CM_RESOURCES.getLocalName());
        for (Property cmr : node.get(CommonAttributes.CM_RESOURCE).asPropertyList()) {
            writer.writeStartElement(CommonAttributes.CM_RESOURCE);
            writer.writeAttribute(Attribute.JNDI_NAME.getLocalName(), cmr.getName());
            if (cmr.getValue().hasDefined(CMResourceResourceDefinition.CM_TABLE_NAME.getName()) || cmr.getValue().hasDefined(CMResourceResourceDefinition.CM_TABLE_BATCH_SIZE.getName()) || cmr.getValue().hasDefined(CMResourceResourceDefinition.CM_TABLE_IMMEDIATE_CLEANUP.getName())) {
                writer.writeStartElement(Element.CM_TABLE.getLocalName());
                CMResourceResourceDefinition.CM_TABLE_NAME.marshallAsAttribute(cmr.getValue(), writer);
                CMResourceResourceDefinition.CM_TABLE_BATCH_SIZE.marshallAsAttribute(cmr.getValue(), writer);
                CMResourceResourceDefinition.CM_TABLE_IMMEDIATE_CLEANUP.marshallAsAttribute(cmr.getValue(), writer);
                writer.writeEndElement();
            }
            writer.writeEndElement();
        }
        writer.writeEndElement();
    }
    if (TransactionSubsystemRootResourceDefinition.STALE_TRANSACTION_TIME.isMarshallable(node)) {
        writer.writeStartElement(Element.CLIENT.getLocalName());
        TransactionSubsystemRootResourceDefinition.STALE_TRANSACTION_TIME.marshallAsAttribute(node, writer);
        writer.writeEndElement();
    }
    writer.writeEndElement();
}
302586.921696cassandra
public void execute(NodeProbe probe) {
    PrintStream out = probe.output().out;
    out.println("Cluster Information:");
    out.println("\tName: " + probe.getClusterName());
    String snitch = probe.getEndpointSnitchInfoProxy().getSnitchName();
    boolean dynamicSnitchEnabled = false;
    if (snitch.equals(DynamicEndpointSnitch.class.getName())) {
        snitch = probe.getDynamicEndpointSnitchInfoProxy().getSubsnitchClassName();
        dynamicSnitchEnabled = true;
    }
    out.println("\tSnitch: " + snitch);
    out.println("\tDynamicEndPointSnitch: " + (dynamicSnitchEnabled ? "enabled" : "disabled"));
    out.println("\tPartitioner: " + probe.getPartitioner());
    out.println("\tSchema versions:");
    Map<String, List<String>> schemaVersions = printPort ? probe.getSpProxy().getSchemaVersionsWithPort() : probe.getSpProxy().getSchemaVersions();
    for (Map.Entry<String, List<String>> entry : schemaVersions.entrySet()) {
        out.printf("\t\t%s: %s%n%n", entry.getKey(), entry.getValue());
    }
    boolean withPort = true;
    joiningNodes = probe.getJoiningNodes(withPort);
    leavingNodes = probe.getLeavingNodes(withPort);
    movingNodes = probe.getMovingNodes(withPort);
    liveNodes = probe.getLiveNodes(withPort);
    unreachableNodes = probe.getUnreachableNodes(withPort);
    List<String> keyspaces = probe.getKeyspaces();
    out.println("Stats for all nodes:");
    out.println("\tLive: " + liveNodes.size());
    out.println("\tJoining: " + joiningNodes.size());
    out.println("\tMoving: " + movingNodes.size());
    out.println("\tLeaving: " + leavingNodes.size());
    out.println("\tUnreachable: " + unreachableNodes.size());
    Map<String, String> tokensToEndpoints = probe.getTokenToEndpointMap(withPort);
    StringBuilder errors = new StringBuilder();
    Map<String, Float> ownerships = null;
    try {
        ownerships = probe.effectiveOwnershipWithPort(keyspace);
    } catch (IllegalStateException ex) {
        try {
            ownerships = probe.getOwnershipWithPort();
            errors.append("Note: ").append(ex.getMessage()).append("%n");
        } catch (Exception e) {
            out.printf("%nError: %s%n", e.getMessage());
            System.exit(1);
        }
    } catch (IllegalArgumentException ex) {
        out.printf("%nError: %s%n", ex.getMessage());
        System.exit(1);
    }
    SortedMap<String, SetHostStatWithPort> dcs = NodeTool.getOwnershipByDcWithPort(probe, resolveIp, tokensToEndpoints, ownerships);
    out.println("\nData Centers: ");
    for (Map.Entry<String, SetHostStatWithPort> dc : dcs.entrySet()) {
        out.print('\t' + dc.getKey());
        ArrayListMultimap<InetAddressAndPort, HostStatWithPort> hostToTokens = ArrayListMultimap.create();
        for (HostStatWithPort stat : dc.getValue()) hostToTokens.put(stat.endpointWithPort, stat);
        int totalNodes = 0;
        int downNodes = 0;
        for (InetAddressAndPort endpoint : hostToTokens.keySet()) {
            totalNodes++;
            if (unreachableNodes.contains(endpoint.getHostAddressAndPort()))
                downNodes++;
        }
        out.print(" #Nodes: " + totalNodes);
        out.println(" #Down: " + downNodes);
    }
    out.println("\nDatabase versions:");
    Map<String, List<String>> databaseVersions = probe.getGossProxy().getReleaseVersionsWithPort();
    for (Map.Entry<String, List<String>> entry : databaseVersions.entrySet()) {
        out.printf("\t%s: %s%n%n", entry.getKey(), entry.getValue());
    }
    out.println("Keyspaces:");
    for (String keyspaceName : keyspaces) {
        String replicationInfo = probe.getKeyspaceReplicationInfo(keyspaceName);
        if (replicationInfo == null) {
            out.println("something went wrong for keyspace: " + keyspaceName);
        }
        out.printf("\t%s -> Replication class: %s%n", keyspaceName, replicationInfo);
    }
    if (errors.length() != 0)
        out.printf("%n" + errors);
}
302425.761895cassandra
public void execute(NodeProbe probe) {
    checkArgument(args.size() == 3 || args.size() == 2 || args.size() == 1 || args.size() == 0, "Invalid arguments, either [keyspace table/* duration] or [keyspace table/*] or [duration] or no args.\n" + "Optionally, use * to represent all tables under the keyspace.");
    checkArgument(topCount > 0, "TopK count (-k) option must have positive value");
    checkArgument(topCount < capacity, "TopK count (-k) option must be smaller then the summary capacity (-s)");
    checkArgument(capacity <= 1024, "Capacity (-s) cannot exceed 1024.");
    String keyspace = null;
    String table = null;
    int durationMillis = 10000;
    if (args.size() == 3) {
        keyspace = args.get(0);
        table = args.get(1);
        durationMillis = Integer.parseInt(args.get(2));
    } else if (args.size() == 2) {
        keyspace = args.get(0);
        table = args.get(1);
    } else if (args.size() == 1) {
        durationMillis = Integer.parseInt(args.get(0));
    }
    keyspace = nullifyWildcard(keyspace);
    table = nullifyWildcard(table);
    checkArgument(durationMillis > 0, "Duration: %s must be positive", durationMillis);
    checkArgument(!hasInterval() || intervalMillis >= durationMillis, "Invalid scheduled sampling interval. Expecting interval >= duration, but interval: %s ms; duration: %s ms", intervalMillis, durationMillis);
    List<String> targets = Lists.newArrayList();
    Set<String> available = Arrays.stream(SamplerType.values()).map(Enum::toString).collect(Collectors.toSet());
    for (String s : samplers.split(",")) {
        String sampler = s.trim().toUpperCase();
        checkArgument(available.contains(sampler), String.format("'%s' sampler is not available from: %s", s, Arrays.toString(SamplerType.values())));
        targets.add(sampler);
    }
    PrintStream out = probe.output().out;
    Map<String, List<CompositeData>> results;
    try {
        if (hasInterval() || shouldStop) {
            boolean opSuccess = probe.handleScheduledSampling(keyspace, table, capacity, topCount, durationMillis, intervalMillis, targets, shouldStop);
            if (!opSuccess) {
                if (shouldStop)
                    out.printf("Unable to stop the non-existent scheduled sampling for keyspace: %s, table: %s%n", keyspace, table);
                else
                    out.printf("Unable to schedule sampling for keyspace: %s, table: %s due to existing samplings. " + "Stop the existing sampling jobs first.%n", keyspace, table);
            }
            return;
        } else if (shouldList) {
            List<Pair<String, String>> sampleTasks = new ArrayList<>();
            int maxKsLength = "KEYSPACE".length();
            int maxTblLength = "TABLE".length();
            for (String fullTableName : probe.getSampleTasks()) {
                String[] parts = fullTableName.split("\\.");
                checkState(parts.length == 2, "Unable to parse the full table name: %s", fullTableName);
                sampleTasks.add(Pair.create(parts[0], parts[1]));
                maxKsLength = Math.max(maxKsLength, parts[0].length());
            }
            String lineFormat = "%" + maxKsLength + "s %" + maxTblLength + "s%n";
            out.printf(lineFormat, "KEYSPACE", "TABLE");
            sampleTasks.forEach(pair -> out.printf(lineFormat, pair.left, pair.right));
            return;
        } else {
            if (keyspace == null || table == null)
                results = probe.getPartitionSample(keyspace, capacity, durationMillis, topCount, targets);
            else
                results = probe.getPartitionSample(keyspace, table, capacity, durationMillis, topCount, targets);
        }
    } catch (OpenDataException e) {
        throw new RuntimeException(e);
    }
    AtomicBoolean first = new AtomicBoolean(true);
    SamplingManager.ResultBuilder rb = new SamplingManager.ResultBuilder(first, results, targets);
    out.println(SamplingManager.formatResult(rb));
}
303082.459107cassandra
public void testMetricsWithStreamingToTwoNodes(boolean useRepair) throws Exception {
    try (Cluster cluster = init(Cluster.build(3).withDataDirCount(1).withConfig(config -> config.with(NETWORK, GOSSIP).set("stream_entire_sstables", false).set("hinted_handoff_enabled", false)).start(), 2)) {
        Stream.of(1, 2, 3).map(cluster::get).forEach(i -> i.runOnInstance(() -> SystemKeyspace.forceBlockingFlush(SystemKeyspace.LOCAL)));
        cluster.schemaChange(String.format("CREATE TABLE %s.cf (k text, c1 text, c2 text, PRIMARY KEY (k)) WITH compaction = {'class': '%s', 'enabled': 'false'}", KEYSPACE, "LeveledCompactionStrategy"));
        final int rowsPerFile = 500;
        final int files = 5;
        cluster.get(1).nodetool("disableautocompaction", KEYSPACE);
        cluster.get(2).nodetool("disableautocompaction", KEYSPACE);
        cluster.get(3).nodetool("disableautocompaction", KEYSPACE);
        IMessageFilters.Filter drop1to3 = cluster.filters().verbs(MUTATION_REQ.id).from(1).to(3).drop();
        int sstablesInitiallyOnNode2 = 0;
        int sstablesInitiallyOnNode3 = 0;
        for (int k = 0; k < 3; k++) {
            for (int i = k * rowsPerFile; i < k * rowsPerFile + rowsPerFile; ++i) {
                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.cf (k, c1, c2) VALUES (?, 'value1', 'value2');"), ConsistencyLevel.ONE, Integer.toString(i));
            }
            cluster.get(1).flush(KEYSPACE);
            cluster.get(2).flush(KEYSPACE);
            sstablesInitiallyOnNode2++;
        }
        drop1to3.off();
        IMessageFilters.Filter drop1to2 = cluster.filters().verbs(MUTATION_REQ.id).from(1).to(2).drop();
        for (int k = 3; k < files; k++) {
            for (int i = k * rowsPerFile; i < k * rowsPerFile + rowsPerFile; ++i) {
                cluster.coordinator(1).execute(withKeyspace("INSERT INTO %s.cf (k, c1, c2) VALUES (?, 'value1', 'value2');"), ConsistencyLevel.ONE, Integer.toString(i));
            }
            cluster.get(1).flush(KEYSPACE);
            cluster.get(3).flush(KEYSPACE);
            sstablesInitiallyOnNode3++;
        }
        drop1to2.off();
        checkThatNoStreamingOccuredBetweenTheThreeNodes(cluster);
        long bytesFrom2To1;
        int sstablesFrom2To1;
        long bytesFrom3To1;
        int sstablesFrom3To1;
        int sstablesFrom3To2;
        int sstablesFrom2To3;
        assertThat(sstablesInitiallyOnNode2).isEqualTo(getNumberOfSSTables(cluster, 2));
        assertThat(sstablesInitiallyOnNode3).isEqualTo(getNumberOfSSTables(cluster, 3));
        if (useRepair) {
            cluster.get(3).nodetool("repair", "--full");
            cluster.get(2).nodetool("repair", "--full");
            bytesFrom2To1 = checkDataSent(cluster, 2, 1);
            sstablesFrom2To1 = sstablesInitiallyOnNode2;
            bytesFrom3To1 = checkDataSent(cluster, 3, 1);
            sstablesFrom3To1 = sstablesInitiallyOnNode3;
            sstablesFrom2To3 = sstablesInitiallyOnNode2;
            sstablesFrom3To2 = sstablesInitiallyOnNode3;
        } else {
            cluster.get(3).nodetool("rebuild", "--keyspace", KEYSPACE);
            cluster.get(2).nodetool("rebuild", "--keyspace", KEYSPACE);
            bytesFrom2To1 = 0;
            sstablesFrom2To1 = 0;
            bytesFrom3To1 = 0;
            sstablesFrom3To1 = 0;
            sstablesFrom2To3 = sstablesInitiallyOnNode2;
            sstablesFrom3To2 = sstablesInitiallyOnNode3 + sstablesInitiallyOnNode2;
        }
        long bytesFrom1To2 = checkDataSent(cluster, 1, 2);
        long bytesFrom1To3 = checkDataSent(cluster, 1, 3);
        long totalBytesSentFrom1 = bytesFrom1To2 + bytesFrom1To3;
        if (useRepair)
            checkTotalDataSent(cluster, 1, totalBytesSentFrom1, totalBytesSentFrom1, 10);
        else
            checkTotalDataSent(cluster, 1, totalBytesSentFrom1, 0, 0);
        checkDataReceived(cluster, 1, 2, bytesFrom2To1, sstablesFrom2To1);
        checkDataReceived(cluster, 1, 3, bytesFrom3To1, sstablesFrom3To1);
        checkTotalDataReceived(cluster, 1, bytesFrom2To1 + bytesFrom3To1);
        long bytesFrom2To3 = checkDataSent(cluster, 2, 3);
        long bytesFrom3To2 = checkDataSent(cluster, 3, 2);
        long totalBytesReceivedBy2 = bytesFrom1To2 + bytesFrom3To2;
        checkDataReceived(cluster, 2, 1, bytesFrom1To2, files);
        checkDataReceived(cluster, 2, 3, bytesFrom3To2, sstablesFrom3To2);
        if (useRepair)
            checkTotalDataSent(cluster, 2, bytesFrom2To3 + bytesFrom2To1, bytesFrom2To3 + bytesFrom2To1, sstablesFrom2To3 + sstablesFrom2To1);
        else
            checkTotalDataSent(cluster, 2, bytesFrom2To3, 0, 0);
        checkTotalDataReceived(cluster, 2, totalBytesReceivedBy2);
        long totalBytesReceivedBy3 = bytesFrom1To3 + bytesFrom2To3;
        checkDataReceived(cluster, 3, 1, bytesFrom1To3, files);
        checkDataReceived(cluster, 3, 2, bytesFrom2To3, sstablesFrom2To3);
        if (useRepair)
            checkTotalDataSent(cluster, 3, bytesFrom3To2 + bytesFrom3To1, bytesFrom3To2 + bytesFrom3To1, sstablesFrom3To2 + sstablesFrom3To1);
        else
            checkTotalDataSent(cluster, 3, bytesFrom3To2, 0, 0);
        checkTotalDataReceived(cluster, 3, totalBytesReceivedBy3);
    }
}
302132.9914102cassandra
public void approximateClockTest() throws InterruptedException {
    ConcurrentHashMap<Long, Long> m = new ConcurrentHashMap<>();
    ConcurrentHashMap<Long, Long> inverse = new ConcurrentHashMap<>();
    TimeUnit timeUnit = TimeUnit.MILLISECONDS;
    int duration = 1000;
    int concurrency = 5;
    long maxTicks = timeUnit.toMicros(duration) / (4 * concurrency);
    ApproximateClock clock = new ApproximateClock(duration, timeUnit);
    ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1);
    ExecutorService executor = Executors.newFixedThreadPool(concurrency);
    final Lock lock = new ReentrantLock();
    AtomicReference<Throwable> throwable = new AtomicReference();
    final Condition signalError = lock.newCondition();
    lock.lock();
    for (int i = 0; i < concurrency; i++) {
        executor.submit(() -> {
            try {
                int sleepCnt = 0;
                while (!executor.isShutdown() && !Thread.currentThread().isInterrupted()) {
                    sleepCnt++;
                    if (sleepCnt >= maxTicks) {
                        LockSupport.parkNanos(timeUnit.toNanos(duration));
                        sleepCnt = 0;
                    }
                    if (executor.isShutdown() || Thread.currentThread().isInterrupted())
                        return;
                    long lts = clock.nextLts();
                    if (lts % 10000 == 0) {
                        scheduledExecutor.schedule(() -> {
                            try {
                                long rts = clock.rts(lts);
                                Assert.assertNull(m.put(lts, rts));
                                Assert.assertNull(inverse.put(rts, lts));
                            } catch (Throwable t) {
                                throwable.set(t);
                                signalError.signalAll();
                                t.printStackTrace();
                            }
                        }, 2 * duration, timeUnit);
                        continue;
                    }
                    try {
                        long rts = clock.rts(lts);
                        Assert.assertNull(m.put(lts, rts));
                        Assert.assertNull(inverse.put(rts, lts));
                    } catch (Throwable t) {
                        throwable.set(t);
                        signalError.signalAll();
                    }
                }
            } catch (Throwable t) {
                throwable.set(t);
                signalError.signalAll();
                t.printStackTrace();
            }
        });
    }
    signalError.await(10, TimeUnit.SECONDS);
    lock.unlock();
    executor.shutdown();
    Assert.assertTrue(executor.awaitTermination(30, TimeUnit.SECONDS));
    scheduledExecutor.shutdown();
    Assert.assertTrue(scheduledExecutor.awaitTermination(10, TimeUnit.SECONDS));
    Throwable t = throwable.get();
    if (t != null)
        throw new AssertionError("Caught an exception while executing", t);
    Assert.assertEquals(m.size(), inverse.size());
    Iterator<Map.Entry<Long, Long>> iter = m.entrySet().iterator();
    Map.Entry<Long, Long> previous = iter.next();
    while (iter.hasNext()) {
        if (previous == null) {
            previous = iter.next();
            continue;
        }
        Map.Entry<Long, Long> current = iter.next();
        long lts = current.getKey();
        long rts = current.getValue();
        Assert.assertEquals(String.format("%s and %s sort wrong", previous, current), Long.compare(previous.getKey(), current.getKey()), Long.compare(previous.getValue(), current.getValue()));
        Assert.assertEquals(clock.rts(lts), rts);
        Assert.assertEquals(clock.lts(rts), lts);
        previous = current;
    }
}
301975.2314106cassandra
public static void main(String[] args) throws Throwable {
    try {
        File configFile = HarryRunner.loadConfig(args);
        Configuration configuration = Configuration.fromFile(configFile);
        System.out.println(Configuration.toYamlString(configuration));
        Set<Long> pdsToSkip = new HashSet<>(Arrays.asList());
        Set<Long> ltsToSkip = new HashSet<>(Arrays.asList());
        final long maxLts = 7000L;
        Predicate<Exception> check = (e) -> true;
        Run run = configuration.createRun();
        Configuration.SequentialRunnerConfig config = (Configuration.SequentialRunnerConfig) configuration.runner;
        List<Visitor> visitors = new ArrayList<>();
        for (Configuration.VisitorConfiguration factory : config.visitorFactories) {
            Visitor visitor = factory.make(run);
            if (visitor instanceof LtsVisitor) {
                AtomicLong counter = new AtomicLong();
                visitors.add(new SkippingVisitor((LtsVisitor) visitor, counter::getAndIncrement, (lts) -> run.pdSelector.pd(lts, run.schemaSpec), ltsToSkip, pdsToSkip));
            } else {
                visitors.add(visitor);
            }
        }
        Set<Long> partitions = new HashSet<>();
        for (long i = 0; i < maxLts; i++) partitions.add(run.pdSelector.pd(i, run.schemaSpec));
        for (Long pdToCheck : partitions) {
            if (pdsToSkip.contains(pdToCheck))
                continue;
            pdsToSkip.add(pdToCheck);
            Runner.init(configuration, run);
            try {
                runOnce(visitors, maxLts);
                System.out.println("Can not skip " + pdToCheck + "\nCan only skip these: " + toString(pdsToSkip));
                pdsToSkip.remove(pdToCheck);
            } catch (RuntimeException t) {
                if (check.test(t)) {
                    System.out.printf("Safe to skip: %d because without it we're still hitting an exception %s.\n%s\n", pdToCheck, t.getMessage(), toString(pdsToSkip));
                } else {
                    System.out.println("Can not skip " + pdToCheck + "\n, since we seem to repro a different issue. Can only skip these: " + toString(pdsToSkip));
                    pdsToSkip.remove(pdToCheck);
                }
            }
            run.sut.schemaChange("DROP KEYSPACE " + run.schemaSpec.keyspace);
        }
        for (long lts = 0; lts < maxLts; lts++) {
            long ltsToCheck = lts;
            if (ltsToSkip.contains(ltsToCheck) || pdsToSkip.contains(run.pdSelector.pd(lts, run.schemaSpec)))
                continue;
            ltsToSkip.add(ltsToCheck);
            Runner.init(configuration, run);
            try {
                runOnce(visitors, maxLts);
                System.out.println("Can not skip " + ltsToCheck + "\nCan only skip these: " + toString(ltsToSkip));
                ltsToSkip.remove(ltsToCheck);
            } catch (RuntimeException t) {
                if (check.test(t)) {
                    System.out.printf("Safe to skip: %d because without it we're still hitting an exception %s.\n%s\n", ltsToCheck, t.getMessage(), toString(ltsToSkip));
                } else {
                    System.out.println("Can not skip " + lts + "\n, since we seem to repro a different issue. Can only skip these: " + toString(ltsToSkip));
                    ltsToSkip.remove(ltsToCheck);
                }
            }
            run.sut.schemaChange("DROP KEYSPACE " + run.schemaSpec.keyspace);
        }
    } catch (Throwable t) {
        System.out.println(t.getMessage());
        t.printStackTrace();
    } finally {
        System.exit(1);
    }
}
302226.091133cassandra
public void testNumericCastsInSelectionClause() throws Throwable {
    createTable("CREATE TABLE %s (a tinyint primary key," + " b smallint," + " c int," + " d bigint," + " e float," + " f double," + " g decimal," + " h varint," + " i int)");
    execute("INSERT INTO %s (a, b, c, d, e, f, g, h) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", (byte) 1, (short) 2, 3, 4L, 5.2F, 6.3, BigDecimal.valueOf(6.3), BigInteger.valueOf(4));
    assertColumnNames(execute("SELECT CAST(b AS int), CAST(c AS int), CAST(d AS double) FROM %s"), "cast(b as int)", "c", "cast(d as double)");
    assertRows(execute("SELECT CAST(a AS tinyint), " + "CAST(b AS tinyint), " + "CAST(c AS tinyint), " + "CAST(d AS tinyint), " + "CAST(e AS tinyint), " + "CAST(f AS tinyint), " + "CAST(g AS tinyint), " + "CAST(h AS tinyint), " + "CAST(i AS tinyint) FROM %s"), row((byte) 1, (byte) 2, (byte) 3, (byte) 4L, (byte) 5, (byte) 6, (byte) 6, (byte) 4, null));
    assertRows(execute("SELECT CAST(a AS smallint), " + "CAST(b AS smallint), " + "CAST(c AS smallint), " + "CAST(d AS smallint), " + "CAST(e AS smallint), " + "CAST(f AS smallint), " + "CAST(g AS smallint), " + "CAST(h AS smallint), " + "CAST(i AS smallint) FROM %s"), row((short) 1, (short) 2, (short) 3, (short) 4L, (short) 5, (short) 6, (short) 6, (short) 4, null));
    assertRows(execute("SELECT CAST(a AS int), " + "CAST(b AS int), " + "CAST(c AS int), " + "CAST(d AS int), " + "CAST(e AS int), " + "CAST(f AS int), " + "CAST(g AS int), " + "CAST(h AS int), " + "CAST(i AS int) FROM %s"), row(1, 2, 3, 4, 5, 6, 6, 4, null));
    assertRows(execute("SELECT CAST(a AS bigint), " + "CAST(b AS bigint), " + "CAST(c AS bigint), " + "CAST(d AS bigint), " + "CAST(e AS bigint), " + "CAST(f AS bigint), " + "CAST(g AS bigint), " + "CAST(h AS bigint), " + "CAST(i AS bigint) FROM %s"), row(1L, 2L, 3L, 4L, 5L, 6L, 6L, 4L, null));
    assertRows(execute("SELECT CAST(a AS float), " + "CAST(b AS float), " + "CAST(c AS float), " + "CAST(d AS float), " + "CAST(e AS float), " + "CAST(f AS float), " + "CAST(g AS float), " + "CAST(h AS float), " + "CAST(i AS float) FROM %s"), row(1.0F, 2.0F, 3.0F, 4.0F, 5.2F, 6.3F, 6.3F, 4.0F, null));
    assertRows(execute("SELECT CAST(a AS double), " + "CAST(b AS double), " + "CAST(c AS double), " + "CAST(d AS double), " + "CAST(e AS double), " + "CAST(f AS double), " + "CAST(g AS double), " + "CAST(h AS double), " + "CAST(i AS double) FROM %s"), row(1.0, 2.0, 3.0, 4.0, (double) 5.2F, 6.3, 6.3, 4.0, null));
    assertRows(execute("SELECT CAST(a AS decimal), " + "CAST(b AS decimal), " + "CAST(c AS decimal), " + "CAST(d AS decimal), " + "CAST(e AS decimal), " + "CAST(f AS decimal), " + "CAST(g AS decimal), " + "CAST(h AS decimal), " + "CAST(i AS decimal) FROM %s"), row(BigDecimal.valueOf(1), BigDecimal.valueOf(2), BigDecimal.valueOf(3), BigDecimal.valueOf(4), new BigDecimal("5.2"), BigDecimal.valueOf(6.3), BigDecimal.valueOf(6.3), BigDecimal.valueOf(4), null));
    assertRows(execute("SELECT CAST(a AS ascii), " + "CAST(b AS ascii), " + "CAST(c AS ascii), " + "CAST(d AS ascii), " + "CAST(e AS ascii), " + "CAST(f AS ascii), " + "CAST(g AS ascii), " + "CAST(h AS ascii), " + "CAST(i AS ascii) FROM %s"), row("1", "2", "3", "4", "5.2", "6.3", "6.3", "4", null));
    assertRows(execute("SELECT CAST(a AS text), " + "CAST(b AS text), " + "CAST(c AS text), " + "CAST(d AS text), " + "CAST(e AS text), " + "CAST(f AS text), " + "CAST(g AS text), " + "CAST(h AS text), " + "CAST(i AS text) FROM %s"), row("1", "2", "3", "4", "5.2", "6.3", "6.3", "4", null));
}
304473.26296cassandra
public void testVectorSimilarityFunction() {
    createTable(KEYSPACE, "CREATE TABLE %s (pk int PRIMARY KEY, value vector<float, 2>, " + "l list<float>, " + "fl frozen<list<float>>, " + "v1 vector<float, 1>, " + "v_int vector<int, 2>, " + "v_double vector<double, 2>)");
    float[] values = new float[] { 1f, 2f };
    CQLTester.Vector<Float> vector = vector(ArrayUtils.toObject(values));
    Object[] similarity = row(luceneFunction.compare(values, values));
    execute("INSERT INTO %s (pk, value, l, fl, v1, v_int, v_double) VALUES (0, ?, ?, ?, ?, ?, ?)", vector, list(1f, 2f), list(1f, 2f), vector(1f), vector(1, 2), vector(1d, 2d));
    assertRows(execute("SELECT " + function + "(value, value) FROM %s"), similarity);
    assertRows(execute("SELECT " + function + "(value, [1, 2]) FROM %s"), similarity);
    assertRows(execute("SELECT " + function + "([1, 2], value) FROM %s"), similarity);
    assertRows(execute("SELECT " + function + "([1, 2], [1, 2]) FROM %s"), similarity);
    assertRows(execute("SELECT " + function + "(value, ?) FROM %s", vector), similarity);
    assertRows(execute("SELECT " + function + "(?, value) FROM %s", vector), similarity);
    assertThatThrownBy(() -> execute("SELECT " + function + "(?, ?) FROM %s", vector, vector)).hasMessageContaining("Cannot infer type of argument ?");
    assertRows(execute("SELECT " + function + "((vector<float, 2>) ?, ?) FROM %s", vector, vector), similarity);
    assertRows(execute("SELECT " + function + "(?, (vector<float, 2>) ?) FROM %s", vector, vector), similarity);
    assertRows(execute("SELECT " + function + "((vector<float, 2>) ?, (vector<float, 2>) ?) FROM %s", vector, vector), similarity);
    assertRows(execute("SELECT " + function + "([1, 2], ?) FROM %s", vector), similarity);
    assertRows(execute("SELECT " + function + "(?, [1, 2]) FROM %s", vector), similarity);
    assertRows(execute("SELECT " + function + "([1, 2], ?) FROM %s", vector), similarity);
    assertThatThrownBy(() -> execute("SELECT " + function + "(l, value) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument l of type list<float>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(fl, value) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument fl of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, l) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument l of type list<float>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, fl) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument fl of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(l, [1, 2]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument l of type list<float>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(fl, [1, 2]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument fl of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], l) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument l of type list<float>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], fl) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument fl of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "((List<Float>)[1, 2], [3, 4]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument (list<float>)[1, 2] of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "((List<Float>)[1, 2], (List<Float>)[3, 4]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument (list<float>)[1, 2] of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], (List<Float>)[3, 4]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument (list<float>)[3, 4] of type frozen<list<float>>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(v_int, [1, 2]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument v_int of type vector<int, 2>");
    assertThatThrownBy(() -> execute("SELECT " + function + "(v_double, [1, 2]) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument v_double of type vector<double, 2>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], v_int) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument v_int of type vector<int, 2>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], v_double) FROM %s")).hasMessageContaining("Function " + function + " requires a float vector argument, but found argument v_double of type vector<double, 2>");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1, 2], [3]) FROM %s", vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, [1]) FROM %s", vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "([1], value) FROM %s", vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "((vector<float, 1>) ?, value) FROM %s", vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, (vector<float, 1>) ?) FROM %s", vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "((vector<float, 2>) ?, (vector<float, 1>) ?) FROM %s", vector(1, 2), vector(1))).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, v1) FROM %s")).hasMessageContaining("All arguments must have the same vector dimensions");
    assertThatThrownBy(() -> execute("SELECT " + function + "(v1, value) FROM %s")).hasMessageContaining("All arguments must have the same vector dimensions");
    assertRows(execute("SELECT " + function + "(value, null) FROM %s"), row((Float) null));
    assertRows(execute("SELECT " + function + "(null, value) FROM %s"), row((Float) null));
    assertThatThrownBy(() -> execute("SELECT " + function + "(null, null) FROM %s")).hasMessageContaining("Cannot infer type of argument NULL in call to function " + function);
    assertRows(execute("SELECT " + function + "(value, ?) FROM %s", (CQLTester.Vector<Float>) null), row((Float) null));
    assertRows(execute("SELECT " + function + "(?, value) FROM %s", (CQLTester.Vector<Float>) null), row((Float) null));
    assertThatThrownBy(() -> execute("SELECT " + function + "(?, ?) FROM %s", null, null)).hasMessageContaining("Cannot infer type of argument ? in call to function " + function);
    if (luceneFunction == VectorSimilarityFunction.COSINE) {
        String expected = "Function " + function + " doesn't support all-zero vectors";
        assertThatThrownBy(() -> execute("SELECT " + function + "(value, [0, 0]) FROM %s")).hasMessageContaining(expected);
        assertThatThrownBy(() -> execute("SELECT " + function + "([0, 0], value) FROM %s")).hasMessageContaining(expected);
    } else {
        float expected = luceneFunction.compare(values, new float[] { 0, 0 });
        assertRows(execute("SELECT " + function + "(value, [0, 0]) FROM %s"), row(expected));
        assertRows(execute("SELECT " + function + "([0, 0], value) FROM %s"), row(expected));
    }
    assertThatThrownBy(() -> execute("SELECT " + function + "(value, ['a', 'b']) FROM %s WHERE pk=0")).hasMessageContaining("Type error: ['a', 'b'] cannot be passed as argument 1");
    assertThatThrownBy(() -> execute("SELECT " + function + "(['a', 'b'], value) FROM %s WHERE pk=0")).hasMessageContaining("Type error: ['a', 'b'] cannot be passed as argument 0");
    assertThatThrownBy(() -> execute("SELECT " + function + "(['a', 'b'], ['a', 'b']) FROM %s WHERE pk=0")).hasMessageContaining("Type error: ['a', 'b'] cannot be passed as argument 0");
}
303476.621109cassandra
public void testBoundsAsClusteringWithMultiSliceRestrictionsWithTwoAscendingAndTwoDescendingClusteringColumns() {
    TableMetadata tableMetadata = newTableMetadata(Sort.ASC, Sort.ASC, Sort.DESC, Sort.DESC);
    ByteBuffer value1 = ByteBufferUtil.bytes(1);
    ByteBuffer value2 = ByteBufferUtil.bytes(2);
    ByteBuffer value3 = ByteBufferUtil.bytes(3);
    ByteBuffer value4 = ByteBufferUtil.bytes(4);
    Restriction restriction = newMultiSlice(tableMetadata, 0, Operator.GT, value1, value2, value3, value4);
    ClusteringColumnRestrictions restrictions = restrictions(tableMetadata, restriction);
    Slices slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    Slice slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEmptyEnd(slice);
    Restriction eq = newSingleRestriction(tableMetadata, 0, Operator.EQ, value1);
    restriction = newMultiSlice(tableMetadata, 1, Operator.GT, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction, eq);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, true, value1);
    Restriction in = newSingleRestriction(tableMetadata, 0, Operator.IN, value1, value2);
    restriction = newMultiSlice(tableMetadata, 1, Operator.GT, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction, in);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(4, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, true, value1);
    slice = slices.get(2);
    assertStartBound(slice, true, value2, value2);
    assertEndBound(slice, false, value2, value2, value3, value4);
    slice = slices.get(3);
    assertStartBound(slice, false, value2, value2);
    assertEndBound(slice, true, value2);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(1, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEmptyEnd(slice);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, true, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEmptyEnd(slice);
    restriction = newMultiSlice(tableMetadata, 0, Operator.LTE, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertEmptyStart(slice);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, true, value1, value2, value3, value4);
    assertEndBound(slice, true, value1, value2);
    restriction = newMultiSlice(tableMetadata, 0, Operator.LT, value1, value2, value3, value4);
    restrictions = restrictions(tableMetadata, restriction);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertEmptyStart(slice);
    assertEndBound(slice, false, value1, value2);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2, value3, value4);
    assertEndBound(slice, true, value1, value2);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GT, value1, value2, value3, value4);
    Restriction restriction2 = newMultiSlice(tableMetadata, 0, Operator.LT, value2, value3);
    restrictions = restrictions(tableMetadata, restriction, restriction2);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(2, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, false, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, false, value2, value3);
    restriction = newMultiSlice(tableMetadata, 0, Operator.GTE, value1, value2, value3, value4);
    restriction2 = newMultiSlice(tableMetadata, 0, Operator.LTE, value4, value3, value2, value1);
    restrictions = restrictions(tableMetadata, restriction, restriction2);
    slices = restrictions.slices(QueryOptions.DEFAULT);
    assertEquals(3, slices.size());
    slice = slices.get(0);
    assertStartBound(slice, true, value1, value2);
    assertEndBound(slice, true, value1, value2, value3, value4);
    slice = slices.get(1);
    assertStartBound(slice, false, value1, value2);
    assertEndBound(slice, false, value4, value3);
    slice = slices.get(2);
    assertStartBound(slice, true, value4, value3, value2, value1);
    assertEndBound(slice, true, value4, value3);
}
302538.471117cassandra
public void testJavaUserTypeAddFieldWithReplace() throws Throwable {
    String type = KEYSPACE + '.' + createType("CREATE TYPE %s (txt text, i int)");
    createTable("CREATE TABLE %s (key int primary key, udt frozen<" + type + ">)");
    String fName1replace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + ") " + "RETURNS NULL ON NULL INPUT " + "RETURNS text " + "LANGUAGE java\n" + "AS $$return udt.getString(\"txt\");$$;");
    String fName2replace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS $$return Integer.valueOf(udt.getInt(\"i\"));$$;");
    String fName3replace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "CALLED ON NULL INPUT " + "RETURNS double " + "LANGUAGE java\n" + "AS $$return Double.valueOf(udt.getDouble(\"added\"));$$;");
    String fName4replace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS " + type + " " + "LANGUAGE java\n" + "AS $$return udt;$$;");
    String fName1noReplace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS text " + "LANGUAGE java\n" + "AS $$return udt.getString(\"txt\");$$;");
    String fName2noReplace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS $$return Integer.valueOf(udt.getInt(\"i\"));$$;");
    String fName3noReplace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "CALLED ON NULL INPUT " + "RETURNS double " + "LANGUAGE java\n" + "AS $$return Double.valueOf(udt.getDouble(\"added\"));$$;");
    String fName4noReplace = createFunction(KEYSPACE, type, "CREATE FUNCTION %s( udt " + type + " ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS " + type + " " + "LANGUAGE java\n" + "AS $$return udt;$$;");
    execute("INSERT INTO %s (key, udt) VALUES (1, {txt: 'one', i:1})");
    assertRows(execute("SELECT " + fName1replace + "(udt) FROM %s WHERE key = 1"), row("one"));
    assertRows(execute("SELECT " + fName2replace + "(udt) FROM %s WHERE key = 1"), row(1));
    execute("ALTER TYPE " + type + " ADD added double");
    execute("INSERT INTO %s (key, udt) VALUES (2, {txt: 'two', i:2, added: 2})");
    execute(String.format("CREATE OR REPLACE FUNCTION %s( udt %s ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS text " + "LANGUAGE java\n" + "AS $$return " + "     udt.getString(\"txt\");$$;", fName1replace, type));
    Assert.assertEquals(1, Schema.instance.getUserFunctions(parseFunctionName(fName1replace)).size());
    execute(String.format("CREATE OR REPLACE FUNCTION %s( udt %s ) " + "CALLED ON NULL INPUT " + "RETURNS int " + "LANGUAGE java\n" + "AS $$return " + "     Integer.valueOf(udt.getInt(\"i\"));$$;", fName2replace, type));
    Assert.assertEquals(1, Schema.instance.getUserFunctions(parseFunctionName(fName2replace)).size());
    execute(String.format("CREATE OR REPLACE FUNCTION %s( udt %s ) " + "CALLED ON NULL INPUT " + "RETURNS double " + "LANGUAGE java\n" + "AS $$return " + "     Double.valueOf(udt.getDouble(\"added\"));$$;", fName3replace, type));
    Assert.assertEquals(1, Schema.instance.getUserFunctions(parseFunctionName(fName3replace)).size());
    execute(String.format("CREATE OR REPLACE FUNCTION %s( udt %s ) " + "RETURNS NULL ON NULL INPUT " + "RETURNS %s " + "LANGUAGE java\n" + "AS $$return " + "     udt;$$;", fName4replace, type, type));
    Assert.assertEquals(1, Schema.instance.getUserFunctions(parseFunctionName(fName4replace)).size());
    assertRows(execute("SELECT " + fName1replace + "(udt) FROM %s WHERE key = 2"), row("two"));
    assertRows(execute("SELECT " + fName2replace + "(udt) FROM %s WHERE key = 2"), row(2));
    assertRows(execute("SELECT " + fName3replace + "(udt) FROM %s WHERE key = 2"), row(2d));
    assertRows(execute("SELECT " + fName3replace + "(udt) FROM %s WHERE key = 1"), row(0d));
    assertRows(execute("SELECT " + fName1noReplace + "(udt) FROM %s WHERE key = 2"), row("two"));
    assertRows(execute("SELECT " + fName2noReplace + "(udt) FROM %s WHERE key = 2"), row(2));
    assertRows(execute("SELECT " + fName3noReplace + "(udt) FROM %s WHERE key = 2"), row(2d));
    assertRows(execute("SELECT " + fName3noReplace + "(udt) FROM %s WHERE key = 1"), row(0d));
    execute("DROP FUNCTION " + fName1replace);
    execute("DROP FUNCTION " + fName2replace);
    execute("DROP FUNCTION " + fName3replace);
    execute("DROP FUNCTION " + fName4replace);
    execute("DROP FUNCTION " + fName1noReplace);
    execute("DROP FUNCTION " + fName2noReplace);
    execute("DROP FUNCTION " + fName3noReplace);
    execute("DROP FUNCTION " + fName4noReplace);
}
301705.971139cassandra
public void testInvalidByteCodeUDFs() throws Throwable {
    assertInvalidByteCode("try\n" + "{\n" + "    clone();\n" + "}\n" + "catch (CloneNotSupportedException e)\n" + "{\n" + "    throw new RuntimeException(e);\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.clone()]");
    assertInvalidByteCode("try\n" + "{\n" + "    finalize();\n" + "}\n" + "catch (Throwable e)\n" + "{\n" + "    throw new RuntimeException(e);\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.finalize()]");
    assertInvalidByteCode('\n' + "return 0d;\n" + "    }\n" + '\n' + "    Object field;\n" + '\n' + "    {", "Java UDF validation failed: [field declared: field]");
    assertInvalidByteCode('\n' + "return 0d;\n" + "    }\n" + '\n' + "    final Object field;\n" + '\n' + "    {\n" + "field = new Object();", "Java UDF validation failed: [field declared: field, initializer declared]");
    assertInvalidByteCode('\n' + "return 0d;\n" + "    }\n" + '\n' + "    Object field = new Object();\n" + '\n' + "    {\n" + "Math.sin(1d);", "Java UDF validation failed: [field declared: field, initializer declared]");
    assertInvalidByteCode('\n' + "return 0d;\n" + "    }\n" + '\n' + "    {\n" + "Math.sin(1d);", "Java UDF validation failed: [initializer declared]");
    assertInvalidByteCode('\n' + "return 0d;\n" + "    }\n" + '\n' + "    static\n" + "    {\n" + "Math.sin(1d);", "Java UDF validation failed: [static initializer declared]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    Math.sin(1d);\n" + "}\n" + "return 0d;", "Java UDF validation failed: [use of synchronized]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    notify();\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.notify(), use of synchronized]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    notifyAll();\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.notifyAll(), use of synchronized]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    try\n" + "    {\n" + "        wait();\n" + "    }\n" + "    catch (InterruptedException e)\n" + "    {\n" + "        throw new RuntimeException(e);\n" + "    }\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.wait(), use of synchronized]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    try\n" + "    {\n" + "        wait(1000L);\n" + "    }\n" + "    catch (InterruptedException e)\n" + "    {\n" + "        throw new RuntimeException(e);\n" + "    }\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.wait(), use of synchronized]");
    assertInvalidByteCode("synchronized (this)\n" + "{\n" + "    try\n" + "    {\n" + "        wait(1000L, 100);\n" + "    }\n" + "    catch (InterruptedException e)\n" + "    {\n" + "        throw new RuntimeException(e);\n" + "    }\n" + "}\n" + "return 0d;", "Java UDF validation failed: [call to java.lang.Object.wait(), use of synchronized]");
    assertInvalidByteCode("try {" + "     java.nio.ByteBuffer.allocateDirect(123); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.nio.ByteBuffer.allocateDirect()]");
    assertInvalidByteCode("try {" + "     java.net.InetAddress.getLocalHost(); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.net.InetAddress.getLocalHost()]");
    assertInvalidByteCode("try {" + "     java.net.InetAddress.getAllByName(\"localhost\"); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.net.InetAddress.getAllByName()]");
    assertInvalidByteCode("try {" + "     java.net.Inet4Address.getByName(\"127.0.0.1\"); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.net.Inet4Address.getByName()]");
    assertInvalidByteCode("try {" + "     java.net.Inet6Address.getByAddress(new byte[]{127,0,0,1}); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.net.Inet6Address.getByAddress()]");
    assertInvalidByteCode("try {" + "     java.net.NetworkInterface.getNetworkInterfaces(); return 0d;" + "} catch (Exception t) {" + "     throw new RuntimeException(t);" + '}', "Java UDF validation failed: [call to java.net.NetworkInterface.getNetworkInterfaces()]");
}
302980.261119cassandra
public void testFilteringWithoutIndicesWithFrozenCollections() throws Throwable {
    createTable("CREATE TABLE %s (a int, b int, c frozen<list<int>>, d frozen<set<int>>, e frozen<map<int, int>>, PRIMARY KEY (a, b))");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 2, [1, 6], {2, 12}, {1: 6})");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 3, [3, 2], {6, 4}, {3: 2})");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (1, 4, [1, 2], {2, 4}, {1: 2})");
    execute("INSERT INTO %s (a, b, c, d, e) VALUES (2, 3, [3, 6], {6, 12}, {3: 6})");
    beforeAndAfterFlush(() -> {
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE c = [3, 2]");
        assertRows(execute("SELECT * FROM %s WHERE c = [3, 2] ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE c > [1, 5] AND c < [3, 6]");
        assertRows(execute("SELECT * FROM %s WHERE c > [1, 5] AND c < [3, 6] ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertRows(execute("SELECT * FROM %s WHERE c >= [1, 6] AND c < [3, 3] ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE c CONTAINS 2");
        assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND c CONTAINS 3 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE d = {6, 4}");
        assertRows(execute("SELECT * FROM %s WHERE d = {6, 4} ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE d > {4, 5} AND d < {6}");
        assertRows(execute("SELECT * FROM %s WHERE d > {4, 5} AND d < {6} ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertRows(execute("SELECT * FROM %s WHERE d >= {2, 12} AND d <= {4, 6} ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE d CONTAINS 4");
        assertRows(execute("SELECT * FROM %s WHERE d CONTAINS 4 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertRows(execute("SELECT * FROM %s WHERE d CONTAINS 4 AND d CONTAINS 6 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE e = {1 : 2}");
        assertRows(execute("SELECT * FROM %s WHERE e = {1 : 2} ALLOW FILTERING"), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE e > {1 : 4} AND e < {3 : 6}");
        assertRows(execute("SELECT * FROM %s WHERE e > {1 : 4} AND e < {3 : 6} ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertRows(execute("SELECT * FROM %s WHERE e >= {1 : 6} AND e <= {3 : 2} ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
        assertInvalidMessage(StatementRestrictions.REQUIRES_ALLOW_FILTERING_MESSAGE, "SELECT * FROM %s WHERE e CONTAINS 2");
        assertRows(execute("SELECT * FROM %s WHERE e CONTAINS 2 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertRows(execute("SELECT * FROM %s WHERE e CONTAINS KEY 1 ALLOW FILTERING"), row(1, 2, list(1, 6), set(2, 12), map(1, 6)), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertInvalidMessage("Map-entry predicates on frozen map column e are not supported", "SELECT * FROM %s WHERE e[1] = 6 ALLOW FILTERING");
        assertRows(execute("SELECT * FROM %s WHERE e CONTAINS KEY 1 AND e CONTAINS 2 ALLOW FILTERING"), row(1, 4, list(1, 2), set(2, 4), map(1, 2)));
        assertRows(execute("SELECT * FROM %s WHERE c CONTAINS 2 AND d CONTAINS 4 AND e CONTAINS KEY 3 ALLOW FILTERING"), row(1, 3, list(3, 2), set(6, 4), map(3, 2)));
    });
    assertInvalidMessage("Invalid null value for column c", "SELECT * FROM %s WHERE c = null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column c", "SELECT * FROM %s WHERE c CONTAINS null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column d", "SELECT * FROM %s WHERE d = null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column d", "SELECT * FROM %s WHERE d CONTAINS null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column e", "SELECT * FROM %s WHERE e = null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column e", "SELECT * FROM %s WHERE e CONTAINS null ALLOW FILTERING");
    assertInvalidMessage("Invalid null value for column e", "SELECT * FROM %s WHERE e CONTAINS KEY null ALLOW FILTERING");
    assertInvalidMessage("Map-entry predicates on frozen map column e are not supported", "SELECT * FROM %s WHERE e[null] = 2 ALLOW FILTERING");
    assertInvalidMessage("Map-entry predicates on frozen map column e are not supported", "SELECT * FROM %s WHERE e[1] = null ALLOW FILTERING");
    assertInvalidMessage("Invalid unset value for column c", "SELECT * FROM %s WHERE c = ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column c", "SELECT * FROM %s WHERE c CONTAINS ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column d", "SELECT * FROM %s WHERE d = ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column d", "SELECT * FROM %s WHERE d CONTAINS ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column e", "SELECT * FROM %s WHERE e = ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column e", "SELECT * FROM %s WHERE e CONTAINS ? ALLOW FILTERING", unset());
    assertInvalidMessage("Invalid unset value for column e", "SELECT * FROM %s WHERE e CONTAINS KEY ? ALLOW FILTERING", unset());
    assertInvalidMessage("Map-entry predicates on frozen map column e are not supported", "SELECT * FROM %s WHERE e[?] = 2 ALLOW FILTERING", unset());
    assertInvalidMessage("Map-entry predicates on frozen map column e are not supported", "SELECT * FROM %s WHERE e[1] = ? ALLOW FILTERING", unset());
}
302502.61996cassandra
public void testCompoundPartitionKey() throws Throwable {
    createTable("CREATE TABLE %s (" + "k int, " + "asciival ascii, " + "bigintval bigint, " + "PRIMARY KEY((k, asciival)))");
    TableMetadata metadata = currentTableMetadata();
    for (ColumnMetadata def : new HashSet<>(metadata.columns())) {
        String asciival = def.name.toString().equals("asciival") ? "" : "AND asciival IS NOT NULL ";
        try {
            String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL " + asciival + "PRIMARY KEY (" + def.name + ", k" + (def.name.toString().equals("asciival") ? "" : ", asciival") + ")";
            createView("mv1_" + def.name, query);
            if (def.type.isMultiCell())
                Assert.fail("MV on a multicell should fail " + def);
        } catch (Exception e) {
            if (!def.type.isMultiCell() && !def.isPartitionKey())
                Assert.fail("MV creation failed on " + def);
        }
        try {
            String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL " + asciival + " PRIMARY KEY (" + def.name + ", asciival" + (def.name.toString().equals("k") ? "" : ", k") + ")";
            createView("mv2_" + def.name, query);
            if (def.type.isMultiCell())
                Assert.fail("MV on a multicell should fail " + def);
        } catch (Exception e) {
            if (!def.type.isMultiCell() && !def.isPartitionKey())
                Assert.fail("MV creation failed on " + def);
        }
        try {
            String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL " + asciival + "PRIMARY KEY ((" + def.name + ", k), asciival)";
            createView("mv3_" + def.name, query);
            if (def.type.isMultiCell())
                Assert.fail("MV on a multicell should fail " + def);
        } catch (Exception e) {
            if (!def.type.isMultiCell() && !def.isPartitionKey())
                Assert.fail("MV creation failed on " + def);
        }
        try {
            String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL " + asciival + "PRIMARY KEY ((" + def.name + ", k), asciival)";
            createView("mv3_" + def.name, query);
            Assert.fail("Should fail on duplicate name");
        } catch (Exception e) {
            Assertions.assertThat(e.getCause()).isInstanceOf(RequestValidationException.class);
        }
        try {
            String query = "CREATE MATERIALIZED VIEW %s AS SELECT * FROM %s WHERE " + def.name + " IS NOT NULL AND k IS NOT NULL " + asciival + "PRIMARY KEY ((" + def.name + ", k), nonexistentcolumn)";
            createView("mv4_" + def.name, query);
            Assert.fail("Should fail with unknown base column");
        } catch (Exception e) {
            Assertions.assertThat(e.getCause()).isInstanceOf(RequestValidationException.class);
        }
    }
    updateView("INSERT INTO %s (k, asciival, bigintval) VALUES (?, ?, from_json(?))", 0, "ascii text", "123123123123");
    updateView("INSERT INTO %s (k, asciival) VALUES (?, from_json(?))", 0, "\"ascii text\"");
    assertRows(execute("SELECT bigintval FROM %s WHERE k = ? and asciival = ?", 0, "ascii text"), row(123123123123L));
    assertRows(execute("SELECT k, bigintval from mv1_asciival WHERE asciival = ?", "ascii text"), row(0, 123123123123L));
    assertRows(execute("SELECT k, bigintval from mv2_k WHERE asciival = ? and k = ?", "ascii text", 0), row(0, 123123123123L));
    assertRows(execute("SELECT k from mv1_bigintval WHERE bigintval = ?", 123123123123L), row(0));
    assertRows(execute("SELECT asciival from mv3_bigintval where bigintval = ? AND k = ?", 123123123123L, 0), row("ascii text"));
    updateView("INSERT INTO %s (k, asciival, bigintval) VALUES (?, ?, from_json(?))", 0, "ascii text", "1");
    assertRows(execute("SELECT bigintval FROM %s WHERE k = ? and asciival = ?", 0, "ascii text"), row(1L));
    assertRows(execute("SELECT k, bigintval from mv1_asciival WHERE asciival = ?", "ascii text"), row(0, 1L));
    assertRows(execute("SELECT k, bigintval from mv2_k WHERE asciival = ? and k = ?", "ascii text", 0), row(0, 1L));
    assertRows(execute("SELECT k from mv1_bigintval WHERE bigintval = ?", 123123123123L));
    assertRows(execute("SELECT asciival from mv3_bigintval where bigintval = ? AND k = ?", 123123123123L, 0));
    assertRows(execute("SELECT asciival from mv3_bigintval where bigintval = ? AND k = ?", 1L, 0), row("ascii text"));
    updateView("TRUNCATE %s");
    assertRows(execute("SELECT bigintval FROM %s WHERE k = ? and asciival = ?", 0, "ascii text"));
    assertRows(execute("SELECT k, bigintval from mv1_asciival WHERE asciival = ?", "ascii text"));
    assertRows(execute("SELECT k, bigintval from mv2_k WHERE asciival = ? and k = ?", "ascii text", 0));
    assertRows(execute("SELECT asciival from mv3_bigintval where bigintval = ? AND k = ?", 1L, 0));
}
303560.613104cassandra
public void TestComparison() {
    ByteBuffer d1 = SimpleDateType.instance.fromString("1970-01-05");
    ByteBuffer d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(4));
    assert SimpleDateType.instance.compare(d1, d2) == 0 : "Failed == comparison";
    String.format("Failed == comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1970-01-05");
    d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(10));
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed comparison of %s and %s, expected <", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1970-01-05");
    d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(-10));
    assert SimpleDateType.instance.compare(d1, d2) > 0 : String.format("Failed comparison of %s and %s, expected > 0", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1");
    d2 = SimpleDateType.instance.fromString("1000");
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed < comparison with string inputs %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    Integer intLimit = Integer.MAX_VALUE;
    d1 = SimpleDateType.instance.fromString("0");
    d2 = SimpleDateType.instance.fromString(intLimit.toString());
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed < comparison with string inputs at integer bounds %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    Long overLimit = (long) (Integer.MAX_VALUE);
    d1 = SimpleDateType.instance.fromString("0");
    d2 = SimpleDateType.instance.fromString(overLimit.toString());
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed < comparison with string inputs at integer bounds %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    Long i1 = 0L;
    Long i2 = (long) Math.pow(2, 32) - 1;
    d1 = SimpleDateType.instance.fromString(i1.toString());
    d2 = SimpleDateType.instance.fromString(i2.toString());
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed limits comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("256");
    d2 = SimpleDateType.instance.fromString("512");
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateSerializer.instance.serialize(makeUnsigned(0));
    d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(Integer.MAX_VALUE));
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed neg/pos comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("-2200-10-10");
    d2 = SimpleDateType.instance.fromString("2200-10-10");
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed neg/pos string comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1969-12-31");
    d2 = SimpleDateType.instance.fromString("1970-01-01");
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed pre/post epoch comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1970-01-01");
    d2 = SimpleDateType.instance.fromString("1970-01-01");
    assert SimpleDateType.instance.compare(d1, d2) == 0 : String.format("Failed == date from string comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    d1 = SimpleDateType.instance.fromString("1970-01-01");
    d2 = SimpleDateType.instance.fromString("1970-01-02");
    assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed post epoch string comparison with %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    for (int i = 0; i < 32; ++i) {
        int offset = (int) Math.pow(2, i);
        d1 = SimpleDateSerializer.instance.serialize(makeUnsigned(0 - offset));
        d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(offset));
        assert SimpleDateType.instance.compare(d1, d2) < 0 : String.format("Failed < comparison of %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    }
    for (int i = 0; i < 32; ++i) {
        int offset = (int) Math.pow(2, i);
        d1 = SimpleDateSerializer.instance.serialize(makeUnsigned(offset));
        d2 = SimpleDateSerializer.instance.serialize(makeUnsigned(0 - offset));
        assert SimpleDateType.instance.compare(d1, d2) > 0 : String.format("Failed > comparison of %s and %s", SimpleDateSerializer.instance.deserialize(d1), SimpleDateSerializer.instance.deserialize(d2));
    }
}
303121.461399cassandra
public void failingRepair() {
    DatabaseDescriptor.getRepairRetrySpec().maxAttempts = new RetrySpec.MaxAttempt(Integer.MAX_VALUE);
    Gen<RepairJobStage> stageGen = Gens.enums().all(RepairJobStage.class);
    qt().withPure(false).withExamples(10).check(rs -> {
        Cluster cluster = new Cluster(rs);
        enableMessageFaults(cluster);
        Gen<Cluster.Node> coordinatorGen = Gens.pick(cluster.nodes.keySet()).map(cluster.nodes::get);
        List<Closeable> closeables = new ArrayList<>();
        for (int example = 0; example < 100; example++) {
            Cluster.Node coordinator = coordinatorGen.next(rs);
            RepairCoordinator repair = coordinator.repair(KEYSPACE, repairOption(rs, coordinator, KEYSPACE, TABLES), false);
            repair.run();
            InetAddressAndPort failingAddress = pickParticipant(rs, coordinator, repair);
            Cluster.Node failingNode = cluster.nodes.get(failingAddress);
            RepairJobStage stage = stageGen.next(rs);
            Set<InetAddressAndPort> syncFailedAddresses = new HashSet<>();
            switch(stage) {
                case VALIDATION:
                    {
                        closeables.add(failingNode.doValidation((cfs, validator) -> {
                            long delayNanos = rs.nextLong(TimeUnit.MILLISECONDS.toNanos(5), TimeUnit.MINUTES.toNanos(1));
                            cluster.unorderedScheduled.schedule(() -> validator.fail(new SimulatedFault("Validation failed")), delayNanos, TimeUnit.NANOSECONDS);
                        }));
                    }
                    break;
                case SYNC:
                    {
                        closeables.add(failingNode.doValidation((cfs, validator) -> addMismatch(rs, cfs, validator)));
                        List<InetAddressAndPort> addresses = ImmutableList.<InetAddressAndPort>builder().add(coordinator.addressAndPort).addAll(repair.state.getNeighborsAndRanges().participants).build();
                        for (InetAddressAndPort address : addresses) {
                            closeables.add(cluster.nodes.get(address).doSync(plan -> {
                                long delayNanos = rs.nextLong(TimeUnit.SECONDS.toNanos(5), TimeUnit.MINUTES.toNanos(10));
                                cluster.unorderedScheduled.schedule(() -> {
                                    if (address == failingAddress || plan.getCoordinator().getPeers().contains(failingAddress)) {
                                        syncFailedAddresses.add(address);
                                        SimulatedFault fault = new SimulatedFault("Sync failed");
                                        for (StreamEventHandler handler : plan.handlers()) handler.onFailure(fault);
                                    } else {
                                        StreamState success = new StreamState(plan.planId(), plan.streamOperation(), Collections.emptySet());
                                        for (StreamEventHandler handler : plan.handlers()) handler.onSuccess(success);
                                    }
                                }, delayNanos, TimeUnit.NANOSECONDS);
                                return null;
                            }));
                        }
                    }
                    break;
                default:
                    throw new IllegalArgumentException("Unknown stage: " + stage);
            }
            cluster.processAll();
            Assertions.assertThat(repair.state.isComplete()).describedAs("Repair job did not complete, and no work is pending...").isTrue();
            Assertions.assertThat(repair.state.getResult().kind).describedAs("Unexpected state: %s -> %s; example %d", repair.state, repair.state.getResult(), example).isEqualTo(Completable.Result.Kind.FAILURE);
            switch(stage) {
                case VALIDATION:
                    {
                        Assertions.assertThat(repair.state.getResult().message).describedAs("Unexpected state: %s -> %s; example %d", repair.state, repair.state.getResult(), example).containsAnyOf("Validation failed in " + failingAddress, "Got VALIDATION_REQ failure from " + failingAddress + ": UNKNOWN");
                    }
                    break;
                case SYNC:
                    AbstractStringAssert<?> a = Assertions.assertThat(repair.state.getResult().message).describedAs("Unexpected state: %s -> %s; example %d", repair.state, repair.state.getResult(), example);
                    String failingMsg = repair.state.getResult().message;
                    if (failingMsg.contains("Sync failed between")) {
                        a.contains("Sync failed between").contains(failingAddress.toString());
                    } else if (failingMsg.contains("Got SYNC_REQ failure from")) {
                        Assertions.assertThat(syncFailedAddresses).isNotEmpty();
                        a.containsAnyOf(syncFailedAddresses.stream().map(s -> "Got SYNC_REQ failure from " + s + ": UNKNOWN").collect(Collectors.toList()).toArray(String[]::new));
                    } else {
                        a.contains("failed with error Sync failed");
                    }
                    break;
                default:
                    throw new IllegalArgumentException("Unknown stage: " + stage);
            }
            assertParticipateResult(cluster, repair, Completable.Result.Kind.FAILURE);
            closeables.forEach(Closeable::close);
            closeables.clear();
        }
    });
}
303143.081116cassandra
public void testReadingNestedSequence_Simple() {
    String padding1 = "A string";
    String padding2 = "Another string";
    BigInteger varint1 = BigInteger.valueOf(0b10000000);
    BigInteger varint2 = BigInteger.valueOf(1 >> 30);
    BigInteger varint3 = BigInteger.valueOf(0x10000000L);
    BigInteger varint4 = BigInteger.valueOf(Long.MAX_VALUE);
    String string1 = "Testing byte sources";
    String string2 = "is neither easy nor fun;";
    String string3 = "But do it we must.";
    String string4 = "— DataStax, 2018";
    MapType<BigInteger, String> varintStringMapType = MapType.getInstance(VARINT, UTF8, false);
    Map<BigInteger, String> varintStringMap = new TreeMap<>();
    varintStringMap.put(varint1, string1);
    varintStringMap.put(varint2, string2);
    varintStringMap.put(varint3, string3);
    varintStringMap.put(varint4, string4);
    ByteSource sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, ByteSource.of(padding1, version), varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version), ByteSource.of(padding2, version));
    ByteSource.Peekable comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
    sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version), ByteSource.of(padding1, version), ByteSource.of(padding2, version));
    comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
    sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, ByteSource.of(padding1, version), ByteSource.of(padding2, version), varintStringMapType.asComparableBytes(varintStringMapType.decompose(varintStringMap), version));
    comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(varintStringMapType, comparableBytes), varintStringMap);
    MapType<String, BigInteger> stringVarintMapType = MapType.getInstance(UTF8, VARINT, false);
    Map<String, BigInteger> stringVarintMap = new HashMap<>();
    stringVarintMap.put(string1, varint1);
    stringVarintMap.put(string2, varint2);
    stringVarintMap.put(string3, varint3);
    stringVarintMap.put(string4, varint4);
    sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, ByteSource.of(padding1, version), stringVarintMapType.asComparableBytes(stringVarintMapType.decompose(stringVarintMap), version), ByteSource.of(padding2, version));
    comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(stringVarintMapType, comparableBytes), stringVarintMap);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
    MapType<String, String> stringStringMapType = MapType.getInstance(UTF8, UTF8, false);
    Map<String, String> stringStringMap = new HashMap<>();
    stringStringMap.put(string1, string4);
    stringStringMap.put(string2, string3);
    stringStringMap.put(string3, string2);
    stringStringMap.put(string4, string1);
    sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, ByteSource.of(padding1, version), stringStringMapType.asComparableBytes(stringStringMapType.decompose(stringStringMap), version), ByteSource.of(padding2, version));
    comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(stringStringMapType, comparableBytes), stringStringMap);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
    MapType<BigInteger, BigInteger> varintVarintMapType = MapType.getInstance(VARINT, VARINT, false);
    Map<BigInteger, BigInteger> varintVarintMap = new HashMap<>();
    varintVarintMap.put(varint1, varint4);
    varintVarintMap.put(varint2, varint3);
    varintVarintMap.put(varint3, varint2);
    varintVarintMap.put(varint4, varint1);
    sequence = ByteSource.withTerminator(ByteSource.TERMINATOR, ByteSource.of(padding1, version), varintVarintMapType.asComparableBytes(varintVarintMapType.decompose(varintVarintMap), version), ByteSource.of(padding2, version));
    comparableBytes = ByteSource.peekable(sequence);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding1);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(varintVarintMapType, comparableBytes), varintVarintMap);
    assertEquals(ByteSource.NEXT_COMPONENT, comparableBytes.next());
    assertEquals(getComponentValue(UTF8, comparableBytes), padding2);
}
301804.812299cassandra
private ArrayNode parseLogStats(InputStream log, ArrayNode stats) {
    BufferedReader reader = new BufferedReader(new InputStreamReader(log));
    ObjectNode json = JsonUtils.JSON_OBJECT_MAPPER.createObjectNode();
    ArrayNode intervals = JsonUtils.JSON_OBJECT_MAPPER.createArrayNode();
    boolean runningMultipleThreadCounts = false;
    String currentThreadCount = null;
    Pattern threadCountMessage = Pattern.compile("Running ([A-Z]+) with ([0-9]+) threads .*");
    ReadingMode mode = ReadingMode.START;
    try {
        String line;
        while ((line = reader.readLine()) != null) {
            if (line.startsWith("Thread count was not specified"))
                runningMultipleThreadCounts = true;
            if (runningMultipleThreadCounts) {
                Matcher tc = threadCountMessage.matcher(line);
                if (tc.matches()) {
                    currentThreadCount = tc.group(2);
                }
            }
            if (line.equals(StressMetrics.HEAD)) {
                mode = ReadingMode.METRICS;
                continue;
            } else if (line.equals("Results:")) {
                mode = ReadingMode.AGGREGATES;
                continue;
            } else if (mode == ReadingMode.AGGREGATES && line.equals("")) {
                mode = ReadingMode.NEXTITERATION;
            } else if (line.equals("END") || line.equals("FAILURE")) {
                break;
            }
            if (mode == ReadingMode.METRICS) {
                ArrayNode metrics = JsonUtils.JSON_OBJECT_MAPPER.createArrayNode();
                String[] parts = line.split(",");
                if (parts.length != StressMetrics.HEADMETRICS.length) {
                    continue;
                }
                for (String m : parts) {
                    try {
                        metrics.add(new BigDecimal(m.trim()));
                    } catch (NumberFormatException e) {
                        metrics.addNull();
                    }
                }
                intervals.add(metrics);
            } else if (mode == ReadingMode.AGGREGATES) {
                String[] parts = line.split(":", 2);
                if (parts.length != 2) {
                    continue;
                }
                json.put(parts[0].trim().toLowerCase(), parts[1].trim());
            } else if (mode == ReadingMode.NEXTITERATION) {
                ArrayNode metrics = json.putArray("metrics");
                for (String metric : StressMetrics.HEADMETRICS) {
                    metrics.add(metric);
                }
                json.put("test", stressSettings.graph.operation);
                if (currentThreadCount == null)
                    json.put("revision", stressSettings.graph.revision);
                else
                    json.put("revision", String.format("%s - %s threads", stressSettings.graph.revision, currentThreadCount));
                String command = StringUtils.join(stressArguments, " ").replaceAll("password=.*? ", "password=******* ");
                json.put("command", command);
                json.set("intervals", intervals);
                stats.add(json);
                json = JsonUtils.JSON_OBJECT_MAPPER.createObjectNode();
                intervals = JsonUtils.JSON_OBJECT_MAPPER.createArrayNode();
                mode = ReadingMode.START;
            }
        }
    } catch (IOException e) {
        throw new RuntimeException("Couldn't read from temporary stress log file");
    }
    if (json.size() != 0)
        stats.add(json);
    return stats;
}
301358.7622111elasticsearch
public void testBuild() throws IOException {
    try {
        RestClient.builder((HttpHost[]) null);
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("hosts must not be null nor empty", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost[] {});
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("hosts must not be null nor empty", e.getMessage());
    }
    try {
        RestClient.builder((Node[]) null);
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("nodes must not be null or empty", e.getMessage());
    }
    try {
        RestClient.builder(new Node[] {});
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("nodes must not be null or empty", e.getMessage());
    }
    try {
        RestClient.builder(new Node(new HttpHost("localhost", 9200)), null);
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("node cannot be null", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200), null);
        fail("should have failed");
    } catch (IllegalArgumentException e) {
        assertEquals("host cannot be null", e.getMessage());
    }
    try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
        assertNotNull(restClient);
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
        fail("should have failed");
    } catch (NullPointerException e) {
        assertEquals("defaultHeaders must not be null", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(new Header[] { null });
        fail("should have failed");
    } catch (NullPointerException e) {
        assertEquals("default header must not be null", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
        fail("should have failed");
    } catch (NullPointerException e) {
        assertEquals("failureListener must not be null", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(null);
        fail("should have failed");
    } catch (NullPointerException e) {
        assertEquals("httpClientConfigCallback must not be null", e.getMessage());
    }
    try {
        RestClient.builder(new HttpHost("localhost", 9200)).setRequestConfigCallback(null);
        fail("should have failed");
    } catch (NullPointerException e) {
        assertEquals("requestConfigCallback must not be null", e.getMessage());
    }
    int numNodes = randomIntBetween(1, 5);
    HttpHost[] hosts = new HttpHost[numNodes];
    for (int i = 0; i < numNodes; i++) {
        hosts[i] = new HttpHost("localhost", 9200 + i);
    }
    RestClientBuilder builder = RestClient.builder(hosts);
    if (randomBoolean()) {
        builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {

            @Override
            public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
                return httpClientBuilder;
            }
        });
    }
    if (randomBoolean()) {
        builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {

            @Override
            public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
                return requestConfigBuilder;
            }
        });
    }
    if (randomBoolean()) {
        int numHeaders = randomIntBetween(1, 5);
        Header[] headers = new Header[numHeaders];
        for (int i = 0; i < numHeaders; i++) {
            headers[i] = new BasicHeader("header" + i, "value");
        }
        builder.setDefaultHeaders(headers);
    }
    if (randomBoolean()) {
        String pathPrefix = (randomBoolean() ? "/" : "") + randomAsciiLettersOfLengthBetween(2, 5);
        while (pathPrefix.length() < 20 && randomBoolean()) {
            pathPrefix += "/" + randomAsciiLettersOfLengthBetween(3, 6);
        }
        builder.setPathPrefix(pathPrefix + (randomBoolean() ? "/" : ""));
    }
    try (RestClient restClient = builder.build()) {
        assertNotNull(restClient);
    }
}
303302.761108elasticsearch
public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
    List<PreConfiguredTokenFilter> filters = new ArrayList<>();
    filters.add(PreConfiguredTokenFilter.singleton("apostrophe", false, ApostropheFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("arabic_normalization", true, ArabicNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("arabic_stem", false, ArabicStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("asciifolding", true, ASCIIFoldingFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("bengali_normalization", true, BengaliNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("brazilian_stem", false, BrazilianStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("cjk_bigram", false, CJKBigramFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("cjk_width", true, CJKWidthFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("classic", false, ClassicFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("common_grams", false, false, input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET)));
    filters.add(PreConfiguredTokenFilter.singleton("czech_stem", false, CzechStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("decimal_digit", true, DecimalDigitFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("delimited_payload", false, input -> new DelimitedPayloadTokenFilter(input, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER)));
    filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer())));
    filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1)));
    filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)));
    filters.add(PreConfiguredTokenFilter.singleton("french_stem", false, input -> new SnowballFilter(input, new FrenchStemmer())));
    filters.add(PreConfiguredTokenFilter.singleton("german_normalization", true, GermanNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("german_stem", false, GermanStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("hindi_normalization", true, HindiNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("indic_normalization", true, IndicNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("keyword_repeat", false, false, KeywordRepeatFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("kstem", false, KStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("length", false, input -> new LengthFilter(input, 0, Integer.MAX_VALUE)));
    filters.add(PreConfiguredTokenFilter.singleton("limit", false, input -> new LimitTokenCountFilter(input, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS)));
    filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false)));
    filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("russian_stem", false, input -> new SnowballFilter(input, "Russian")));
    filters.add(PreConfiguredTokenFilter.singleton("scandinavian_folding", true, ScandinavianFoldingFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> {
        TokenStream ts = new ShingleFilter(input);
        ts.addAttribute(DisableGraphAttribute.class);
        return ts;
    }));
    filters.add(PreConfiguredTokenFilter.singleton("snowball", false, input -> new SnowballFilter(input, "English")));
    filters.add(PreConfiguredTokenFilter.singleton("sorani_normalization", true, SoraniNormalizationFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET)));
    filters.add(PreConfiguredTokenFilter.singleton("trim", true, TrimFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10)));
    filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("unique", false, UniqueTokenFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("uppercase", true, UpperCaseFilter::new));
    filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, false, input -> new WordDelimiterFilter(input, WordDelimiterFilter.GENERATE_WORD_PARTS | WordDelimiterFilter.GENERATE_NUMBER_PARTS | WordDelimiterFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterFilter.SPLIT_ON_NUMERICS | WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null)));
    filters.add(PreConfiguredTokenFilter.indexVersion("word_delimiter_graph", false, false, (input, version) -> {
        boolean adjustOffsets = version.onOrAfter(IndexVersions.V_7_3_0);
        return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null);
    }));
    return filters;
}
302451.317110elasticsearch
public void testDetermineAffectedDataStreams() {
    Metadata.Builder builder = Metadata.builder();
    DataStream dataStreamWithoutLifecycle = newDataStreamInstance("ds-no-lifecycle", List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), 1, null, false, null, List.of());
    builder.put(dataStreamWithoutLifecycle);
    String dataStreamNoRetention = "ds-no-retention";
    DataStream dataStreamWithLifecycleNoRetention = newDataStreamInstance(dataStreamNoRetention, List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), 1, null, false, DataStreamLifecycle.DEFAULT, List.of());
    builder.put(dataStreamWithLifecycleNoRetention);
    DataStream dataStreamWithLifecycleShortRetention = newDataStreamInstance("ds-no-short-retention", List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), 1, null, false, DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(7)).build(), List.of());
    builder.put(dataStreamWithLifecycleShortRetention);
    String dataStreamLongRetention = "ds-long-retention";
    DataStream dataStreamWithLifecycleLongRetention = newDataStreamInstance(dataStreamLongRetention, List.of(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10))), 1, null, false, DataStreamLifecycle.newBuilder().dataRetention(TimeValue.timeValueDays(365)).build(), List.of());
    builder.put(dataStreamWithLifecycleLongRetention);
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build();
    {
        var affectedDataStreams = service.determineAffectedDataStreams(null, clusterState);
        assertThat(affectedDataStreams.isEmpty(), is(true));
    }
    {
        var globalRetention = randomNonEmptyGlobalRetention();
        var clusterStateWithRetention = ClusterState.builder(clusterState).putCustom(DataStreamGlobalRetention.TYPE, globalRetention).build();
        var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterStateWithRetention);
        assertThat(affectedDataStreams.isEmpty(), is(true));
    }
    {
        var globalRetention = new DataStreamGlobalRetention(TimeValue.timeValueDays(randomIntBetween(1, 10)), null);
        var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterState);
        if (dataStreamWithLifecycleNoRetention.isSystem()) {
            assertThat(affectedDataStreams.size(), is(0));
        } else {
            assertThat(affectedDataStreams.size(), is(1));
            var dataStream = affectedDataStreams.get(0);
            assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention));
            assertThat(dataStream.previousEffectiveRetention(), nullValue());
            assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getDefaultRetention()));
        }
    }
    {
        var globalRetention = new DataStreamGlobalRetention(null, TimeValue.timeValueDays(randomIntBetween(10, 90)));
        var affectedDataStreams = service.determineAffectedDataStreams(globalRetention, clusterState);
        if (dataStreamWithLifecycleLongRetention.isSystem() && dataStreamWithLifecycleNoRetention.isSystem()) {
            assertThat(affectedDataStreams.size(), is(0));
        } else if (dataStreamWithLifecycleLongRetention.isSystem() == false && dataStreamWithLifecycleNoRetention.isSystem() == false) {
            assertThat(affectedDataStreams.size(), is(2));
            var dataStream = affectedDataStreams.get(0);
            assertThat(dataStream.dataStreamName(), equalTo(dataStreamLongRetention));
            assertThat(dataStream.previousEffectiveRetention(), notNullValue());
            assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention()));
            dataStream = affectedDataStreams.get(1);
            assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention));
            assertThat(dataStream.previousEffectiveRetention(), nullValue());
            assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention()));
        } else if (dataStreamWithLifecycleLongRetention.isSystem() == false) {
            assertThat(affectedDataStreams.size(), is(1));
            var dataStream = affectedDataStreams.get(0);
            assertThat(dataStream.dataStreamName(), equalTo(dataStreamLongRetention));
            assertThat(dataStream.previousEffectiveRetention(), notNullValue());
            assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention()));
        } else {
            assertThat(affectedDataStreams.size(), is(1));
            var dataStream = affectedDataStreams.get(0);
            assertThat(dataStream.dataStreamName(), equalTo(dataStreamNoRetention));
            assertThat(dataStream.previousEffectiveRetention(), nullValue());
            assertThat(dataStream.newEffectiveRetention(), equalTo(globalRetention.getMaxRetention()));
        }
    }
    {
        DataStreamFactoryRetention factoryRetention = randomNonEmptyFactoryRetention();
        UpdateDataStreamGlobalRetentionService serviceWithRandomFactoryRetention = new UpdateDataStreamGlobalRetentionService(clusterService, new DataStreamGlobalRetentionResolver(factoryRetention));
        var globalRetention = new DataStreamGlobalRetention(factoryRetention.getDefaultRetention(), factoryRetention.getMaxRetention());
        var affectedDataStreams = serviceWithRandomFactoryRetention.determineAffectedDataStreams(globalRetention, clusterState);
        assertThat(affectedDataStreams, is(empty()));
    }
}
305010.84299elasticsearch
public void testPartialType() {
    int dots = randomIntBetween(1, 5);
    StringBuilder builder = new StringBuilder("test0");
    for (int dot = 0; dot < dots; ++dot) {
        builder.append(".test");
        builder.append(dot + 1);
    }
    String symbol = builder.toString();
    IllegalArgumentException iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " = 1"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("int x; x = " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " += 1"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " + 1"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("1 + " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " || true"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("true || " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + "[0]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("int[] x = new int[1]; x[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("def x = new int[1]; x[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("Map x = new HashMap(); x[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("List x = new ArrayList(); x[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("List x = new ArrayList(); x.add(" + symbol + ")"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("def x = new ArrayList(); x.add(" + symbol + ")"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("staticAddIntsTest(" + symbol + ", 1)"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " > true"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("true > " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " ? 2 : 1"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("boolean x = true; x ? " + symbol + " : 1"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("boolean x = true; x ? 2 : " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + "[0]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("int[] x = new int[1]; x[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " ?: []"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("boolean x = true; x ?: " + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("(int)" + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol + " instanceof List"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("[" + symbol + " : 1]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("[1 : " + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("new int[" + symbol + "]"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("new ArrayList(" + symbol + ")"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("!" + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("-" + symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("int x = " + symbol + ";"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("do {int x = 1;} while (" + symbol + ");"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("for (def x : " + symbol + ") {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec(symbol));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("for (int x = " + symbol + ";;) {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("for (;" + symbol + ";) {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("for (;;++" + symbol + ") {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("if (" + symbol + ") {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("if (" + symbol + ") {int x = 1;} else {int x = 2;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("return " + symbol + ";"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("throw " + symbol + ";"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
    iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("while (" + symbol + ") {int x = 1;}"));
    assertEquals(iae.getMessage(), "cannot resolve symbol [" + symbol + "]");
}
302300.133181elasticsearch
protected static ShapeBuilder<?, ?, ?> parse(XContentParser parser, AbstractShapeGeometryFieldMapper<?> shapeMapper) throws IOException {
    GeoShapeType shapeType = null;
    DistanceUnit.Distance radius = null;
    CoordinateNode coordinateNode = null;
    GeometryCollectionBuilder geometryCollections = null;
    Orientation orientation = (shapeMapper == null) ? Orientation.RIGHT : shapeMapper.orientation();
    boolean coerce = shapeMapper != null && shapeMapper.coerce();
    boolean ignoreZValue = shapeMapper == null || shapeMapper.ignoreZValue();
    String malformedException = null;
    XContentParser.Token token;
    try (XContentParser subParser = new XContentSubParser(parser)) {
        while ((token = subParser.nextToken()) != XContentParser.Token.END_OBJECT) {
            if (token == XContentParser.Token.FIELD_NAME) {
                String fieldName = subParser.currentName();
                if (ShapeParser.FIELD_TYPE.match(fieldName, subParser.getDeprecationHandler())) {
                    subParser.nextToken();
                    final GeoShapeType type = GeoShapeType.forName(subParser.text());
                    if (shapeType != null && shapeType.equals(type) == false) {
                        malformedException = ShapeParser.FIELD_TYPE + " already parsed as [" + shapeType + "] cannot redefine as [" + type + "]";
                    } else {
                        shapeType = type;
                    }
                } else if (ShapeParser.FIELD_COORDINATES.match(fieldName, subParser.getDeprecationHandler())) {
                    subParser.nextToken();
                    CoordinateNode tempNode = parseCoordinates(subParser, ignoreZValue);
                    if (coordinateNode != null && tempNode.numDimensions() != coordinateNode.numDimensions()) {
                        throw new ElasticsearchParseException("Exception parsing coordinates: " + "number of dimensions do not match");
                    }
                    coordinateNode = tempNode;
                } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName, subParser.getDeprecationHandler())) {
                    if (shapeType == null) {
                        shapeType = GeoShapeType.GEOMETRYCOLLECTION;
                    } else if (shapeType.equals(GeoShapeType.GEOMETRYCOLLECTION) == false) {
                        malformedException = "cannot have [" + ShapeParser.FIELD_GEOMETRIES + "] with type set to [" + shapeType + "]";
                    }
                    subParser.nextToken();
                    geometryCollections = parseGeometries(subParser, shapeMapper);
                } else if (CircleBuilder.FIELD_RADIUS.match(fieldName, subParser.getDeprecationHandler())) {
                    if (shapeType == null) {
                        shapeType = GeoShapeType.CIRCLE;
                    } else if (shapeType.equals(GeoShapeType.CIRCLE) == false) {
                        malformedException = "cannot have [" + CircleBuilder.FIELD_RADIUS + "] with type set to [" + shapeType + "]";
                    }
                    subParser.nextToken();
                    radius = DistanceUnit.Distance.parseDistance(subParser.text());
                } else if (ShapeParser.FIELD_ORIENTATION.match(fieldName, subParser.getDeprecationHandler())) {
                    if (shapeType != null && (shapeType.equals(GeoShapeType.POLYGON) || shapeType.equals(GeoShapeType.MULTIPOLYGON)) == false) {
                        malformedException = "cannot have [" + ShapeParser.FIELD_ORIENTATION + "] with type set to [" + shapeType + "]";
                    }
                    subParser.nextToken();
                    orientation = Orientation.fromString(subParser.text());
                } else {
                    subParser.nextToken();
                    subParser.skipChildren();
                }
            }
        }
    }
    if (malformedException != null) {
        throw new ElasticsearchParseException(malformedException);
    } else if (shapeType == null) {
        throw new ElasticsearchParseException("shape type not included");
    } else if (coordinateNode == null && GeoShapeType.GEOMETRYCOLLECTION != shapeType) {
        throw new ElasticsearchParseException("coordinates not included");
    } else if (geometryCollections == null && GeoShapeType.GEOMETRYCOLLECTION == shapeType) {
        throw new ElasticsearchParseException("geometries not included");
    } else if (radius != null && GeoShapeType.CIRCLE != shapeType) {
        throw new ElasticsearchParseException("field [{}] is supported for [{}] only", CircleBuilder.FIELD_RADIUS, CircleBuilder.TYPE);
    }
    if (shapeType.equals(GeoShapeType.GEOMETRYCOLLECTION)) {
        return geometryCollections;
    }
    return shapeType.getBuilder(coordinateNode, radius, orientation, coerce);
}
303838.511103elasticsearch
public void testCombinedRangeAndTermWithMinimumShouldMatch() {
    Query disj = new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 0, 10), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).setMinimumNumberShouldMatch(2).build();
    Result r = analyze(disj);
    assertThat(r.minimumShouldMatch, equalTo(1));
    assertThat(r.extractions, hasSize(2));
    assertFalse(r.matchAllDocs);
    assertFalse(r.verified);
    Query q = new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 0, 10), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.FILTER).setMinimumNumberShouldMatch(2).build();
    Result result = analyze(q);
    assertThat(result.minimumShouldMatch, equalTo(1));
    assertThat(result.extractions.size(), equalTo(2));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    q = new BooleanQuery.Builder().add(q, Occur.MUST).add(q, Occur.MUST).build();
    result = analyze(q);
    assertThat(result.minimumShouldMatch, equalTo(1));
    assertThat(result.extractions.size(), equalTo(2));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q2 = new BooleanQuery.Builder().add(new TermQuery(new Term("f", "v1")), Occur.FILTER).add(IntPoint.newRangeQuery("i", 15, 20), Occur.SHOULD).add(new TermQuery(new Term("f", "v2")), Occur.SHOULD).add(new TermQuery(new Term("f", "v2")), Occur.MUST).setMinimumNumberShouldMatch(1).build();
    result = analyze(q2);
    assertThat(result.minimumShouldMatch, equalTo(2));
    assertThat(result.extractions, hasSize(3));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q3 = new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 15, 20), Occur.SHOULD).add(IntPoint.newRangeQuery("i2", 15, 20), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).add(new TermQuery(new Term("f", "v2")), Occur.MUST).setMinimumNumberShouldMatch(1).build();
    result = analyze(q3);
    assertThat(result.minimumShouldMatch, equalTo(2));
    assertThat(result.extractions, hasSize(4));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q4 = new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 15, 20), Occur.SHOULD).add(IntPoint.newRangeQuery("i", 25, 30), Occur.SHOULD).add(IntPoint.newRangeQuery("i", 35, 40), Occur.SHOULD).add(new TermQuery(new Term("f", "v1")), Occur.SHOULD).add(new TermQuery(new Term("f", "v2")), Occur.MUST).setMinimumNumberShouldMatch(1).build();
    result = analyze(q4);
    assertThat(result.minimumShouldMatch, equalTo(2));
    assertThat(result.extractions, hasSize(5));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q5 = new BooleanQuery.Builder().add(new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 15, 20), Occur.MUST).add(IntPoint.newRangeQuery("i", 25, 30), Occur.MUST).build(), Occur.MUST).add(IntPoint.newRangeQuery("i", 35, 40), Occur.MUST).add(new TermQuery(new Term("f", "v2")), Occur.MUST).build();
    result = analyze(q5);
    assertThat(result.minimumShouldMatch, equalTo(2));
    assertThat(result.extractions, hasSize(4));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q6 = new BooleanQuery.Builder().add(new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 15, 20), Occur.MUST).add(IntPoint.newRangeQuery("i2", 25, 30), Occur.MUST).build(), Occur.MUST).add(IntPoint.newRangeQuery("i", 35, 40), Occur.MUST).add(new TermQuery(new Term("f", "v2")), Occur.MUST).build();
    result = analyze(q6);
    assertThat(result.minimumShouldMatch, equalTo(3));
    assertThat(result.extractions, hasSize(4));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
    Query q7 = new BooleanQuery.Builder().add(new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 1, 2), Occur.MUST).add(new TermQuery(new Term("f", "1")), Occur.MUST).build(), Occur.MUST).add(new BooleanQuery.Builder().add(IntPoint.newRangeQuery("i", 1, 2), Occur.MUST).add(new TermQuery(new Term("f", "2")), Occur.MUST).build(), Occur.MUST).build();
    result = analyze(q7);
    assertThat(result.minimumShouldMatch, equalTo(3));
    assertThat(result.extractions, hasSize(3));
    assertFalse(result.verified);
    assertFalse(result.matchAllDocs);
}
305106.73395elasticsearch
public void testMultiTermVectorsWithVersion() throws Exception {
    assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSettings(Settings.builder().put("index.refresh_interval", -1)));
    ensureGreen();
    MultiTermVectorsResponse response = client().prepareMultiTermVectors().add(indexOrAlias(), "1").get();
    assertThat(response.getResponses().length, equalTo(1));
    assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
    for (int i = 0; i < 3; i++) {
        prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get();
    }
    response = client().prepareMultiTermVectors().add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(Versions.MATCH_ANY)).add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(1)).add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(2)).get();
    assertThat(response.getResponses().length, equalTo(3));
    assertThat(response.getResponses()[0].getFailure(), nullValue());
    assertThat(response.getResponses()[0].getId(), equalTo("1"));
    assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[] { "value1" });
    assertThat(response.getResponses()[1].getId(), equalTo("1"));
    assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[1].getFailure(), nullValue());
    assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[1].getResponse().getFields().terms("field"), new String[] { "value1" });
    assertThat(response.getResponses()[2].getFailure(), notNullValue());
    assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
    assertThat(response.getResponses()[2].getFailure().getCause(), instanceOf(ElasticsearchException.class));
    assertThat(response.getResponses()[2].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class));
    refresh();
    response = client().prepareMultiTermVectors().add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)).add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(1).realtime(false)).add(new TermVectorsRequest(indexOrAlias(), "1").selectedFields("field").version(2).realtime(false)).get();
    assertThat(response.getResponses().length, equalTo(3));
    assertThat(response.getResponses()[0].getFailure(), nullValue());
    assertThat(response.getResponses()[0].getId(), equalTo("1"));
    assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[] { "value1" });
    assertThat(response.getResponses()[1].getId(), equalTo("1"));
    assertThat(response.getResponses()[1].getFailure(), nullValue());
    assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[1].getResponse().getFields().terms("field"), new String[] { "value1" });
    assertThat(response.getResponses()[2].getFailure(), notNullValue());
    assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
    assertThat(response.getResponses()[2].getFailure().getCause(), instanceOf(ElasticsearchException.class));
    assertThat(response.getResponses()[2].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class));
    for (int i = 0; i < 3; i++) {
        prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value" + i).get();
    }
    response = client().prepareMultiTermVectors().add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(Versions.MATCH_ANY)).add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(1)).add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(2)).get();
    assertThat(response.getResponses().length, equalTo(3));
    assertThat(response.getResponses()[0].getFailure(), nullValue());
    assertThat(response.getResponses()[0].getId(), equalTo("2"));
    assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[] { "value2" });
    assertThat(response.getResponses()[1].getFailure(), notNullValue());
    assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
    assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[1].getFailure().getCause(), instanceOf(ElasticsearchException.class));
    assertThat(response.getResponses()[1].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class));
    assertThat(response.getResponses()[2].getId(), equalTo("2"));
    assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[2].getFailure(), nullValue());
    assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[2].getResponse().getFields().terms("field"), new String[] { "value2" });
    refresh();
    response = client().prepareMultiTermVectors().add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(Versions.MATCH_ANY)).add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(1)).add(new TermVectorsRequest(indexOrAlias(), "2").selectedFields("field").version(2)).get();
    assertThat(response.getResponses().length, equalTo(3));
    assertThat(response.getResponses()[0].getFailure(), nullValue());
    assertThat(response.getResponses()[0].getId(), equalTo("2"));
    assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[] { "value2" });
    assertThat(response.getResponses()[1].getFailure(), notNullValue());
    assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
    assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[1].getFailure().getCause(), instanceOf(ElasticsearchException.class));
    assertThat(response.getResponses()[1].getFailure().getCause().getCause(), instanceOf(VersionConflictEngineException.class));
    assertThat(response.getResponses()[2].getId(), equalTo("2"));
    assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
    assertThat(response.getResponses()[2].getFailure(), nullValue());
    assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
    checkTermTexts(response.getResponses()[2].getResponse().getFields().terms("field"), new String[] { "value2" });
}
301698.9211120elasticsearch
public void testReuseInFileBasedPeerRecovery() throws Exception {
    internalCluster().startMasterOnlyNode();
    final String primaryNode = internalCluster().startDataOnlyNode(nodeSettings(0, Settings.EMPTY));
    client(primaryNode).admin().indices().prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 1).put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")).get();
    logger.info("--> indexing docs");
    int numDocs = randomIntBetween(1, 1024);
    for (int i = 0; i < numDocs; i++) {
        client(primaryNode).prepareIndex("test").setSource("field", "value").get();
    }
    client(primaryNode).admin().indices().prepareFlush("test").setForce(true).get();
    final String replicaNode = internalCluster().startDataOnlyNode(nodeSettings(1, Settings.EMPTY));
    ensureGreen();
    final RecoveryResponse initialRecoveryReponse = indicesAdmin().prepareRecoveries("test").get();
    final Set<String> files = new HashSet<>();
    for (final RecoveryState recoveryState : initialRecoveryReponse.shardRecoveryStates().get("test")) {
        if (recoveryState.getTargetNode().getName().equals(replicaNode)) {
            for (final RecoveryState.FileDetail file : recoveryState.getIndex().fileDetails()) {
                files.add(file.name());
            }
            break;
        }
    }
    logger.info("--> restart replica node");
    boolean softDeleteEnabled = internalCluster().getInstance(IndicesService.class, primaryNode).indexServiceSafe(resolveIndex("test")).getShard(0).indexSettings().isSoftDeleteEnabled();
    int moreDocs = randomIntBetween(1, 1024);
    internalCluster().restartNode(replicaNode, new RestartCallback() {

        @Override
        public Settings onNodeStopped(String nodeName) throws Exception {
            for (int i = 0; i < moreDocs; i++) {
                client(primaryNode).prepareIndex("test").setSource("field", "value").get();
            }
            client(primaryNode).admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0).put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "0s")).get();
            assertBusy(() -> assertThat(indicesAdmin().prepareStats("test").get().getShards()[0].getRetentionLeaseStats().retentionLeases().leases().size(), equalTo(1)));
            indicesAdmin().prepareFlush("test").setForce(true).get();
            if (softDeleteEnabled) {
                indicesAdmin().prepareFlush("test").setForce(true).get();
            }
            return super.onNodeStopped(nodeName);
        }
    });
    ensureGreen();
    final RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("test").get();
    for (final RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
        long recovered = 0;
        long reused = 0;
        int filesRecovered = 0;
        int filesReused = 0;
        for (final RecoveryState.FileDetail file : recoveryState.getIndex().fileDetails()) {
            if (files.contains(file.name()) == false) {
                recovered += file.length();
                filesRecovered++;
            } else {
                reused += file.length();
                filesReused++;
            }
        }
        if (recoveryState.getPrimary()) {
            assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0L));
            assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes()));
            assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0));
            assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount()));
        } else {
            logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(), recoveryState.getSourceNode().getName(), recoveryState.getTargetNode().getName(), recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
            assertThat("bytes should have been recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered));
            assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0L));
            assertThat("all existing files should be reused, byte count mismatch", recoveryState.getIndex().reusedBytes(), equalTo(reused));
            assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes() - recovered));
            assertThat("the segment from the last round of indexing should be recovered", recoveryState.getIndex().recoveredFileCount(), equalTo(filesRecovered));
            assertThat("all existing files should be reused, file count mismatch", recoveryState.getIndex().reusedFileCount(), equalTo(filesReused));
            assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount() - filesRecovered));
            assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0));
            assertThat("no translog ops should be recovered", recoveryState.getTranslog().recoveredOperations(), equalTo(0));
        }
    }
}
302965.841114elasticsearch
public void testResolveSearchRouting() {
    createIndex("test1");
    createIndex("test2");
    createIndex("test3");
    clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get();
    indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("test1").alias("alias")).addAliasAction(AliasActions.add().index("test1").alias("alias10").routing("0")).addAliasAction(AliasActions.add().index("test2").alias("alias20").routing("0")).addAliasAction(AliasActions.add().index("test2").alias("alias21").routing("1")).addAliasAction(AliasActions.add().index("test1").alias("alias0").routing("0")).addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0")).addAliasAction(AliasActions.add().index("test3").alias("alias3tw").routing("tw ")).addAliasAction(AliasActions.add().index("test3").alias("alias3ltw").routing(" ltw ")).addAliasAction(AliasActions.add().index("test3").alias("alias3lw").routing(" lw")).get();
    ClusterState state = clusterService().state();
    IndexNameExpressionResolver indexNameExpressionResolver = internalCluster().getInstance(IndexNameExpressionResolver.class);
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias"), nullValue());
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1", "alias"), equalTo(newMap("test1", newSet("0", "1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias10"), equalTo(newMap("test1", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias10"), equalTo(newMap("test1", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", "alias10"), equalTo(newMap("test1", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", "alias10"), nullValue());
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[] { "alias10", "alias20" }), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[] { "alias10", "alias21" }), equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[] { "alias20", "alias21" }), equalTo(newMap("test2", newSet("0", "1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[] { "test1", "alias10" }), nullValue());
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[] { "alias10", "test1" }), nullValue());
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", new String[] { "alias10", "alias20" }), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1", new String[] { "alias10", "alias20" }), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", new String[] { "alias10", "alias20" }), nullValue());
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", new String[] { "alias10", "alias21" }), equalTo(newMap("test1", newSet("0"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", new String[] { "alias10", "alias21" }), equalTo(newMap("test2", newSet("1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2", new String[] { "alias10", "alias21" }), equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2", new String[] { "test1", "alias10", "alias21" }), equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "test1"), equalTo(newMap("test1", newSet("tw ", " ltw ", " lw"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3tw"), equalTo(newMap("test3", newSet("tw "))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3ltw"), equalTo(newMap("test3", newSet(" ltw "))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3lw"), equalTo(newMap("test3", newSet(" lw"))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,tw , ltw , lw", "test1", "alias3ltw"), equalTo(newMap("test1", newSet("0", "tw ", " ltw ", " lw"), "test3", newSet(" ltw "))));
    assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2,tw , ltw , lw", (String[]) null), equalTo(newMap("test1", newSet("0", "1", "2", "tw ", " ltw ", " lw"), "test2", newSet("0", "1", "2", "tw ", " ltw ", " lw"), "test3", newSet("0", "1", "2", "tw ", " ltw ", " lw"))));
    assertThat(IndexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metadata(), "0,1,2,tw , ltw , lw"), equalTo(newMap("test1", newSet("0", "1", "2", "tw ", " ltw ", " lw"), "test2", newSet("0", "1", "2", "tw ", " ltw ", " lw"), "test3", newSet("0", "1", "2", "tw ", " ltw ", " lw"))));
}
302264.392391elasticsearch
private GetResult innerGetFetch(String id, String[] storedFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, boolean forceSyntheticSource) throws IOException {
    assert get.exists() : "method should only be called if document could be retrieved";
    MappingLookup mappingLookup = mapperService.mappingLookup();
    if (storedFields != null) {
        for (String field : storedFields) {
            Mapper fieldMapper = mappingLookup.getMapper(field);
            if (fieldMapper == null) {
                if (mappingLookup.objectMappers().get(field) != null) {
                    throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
                }
            }
        }
    }
    Map<String, DocumentField> documentFields = null;
    Map<String, DocumentField> metadataFields = null;
    DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
    SourceLoader loader = forceSyntheticSource ? new SourceLoader.Synthetic(mappingLookup.getMapping(), mapperMetrics.sourceFieldMetrics()) : mappingLookup.newSourceLoader(mapperMetrics.sourceFieldMetrics());
    StoredFieldLoader storedFieldLoader = buildStoredFieldLoader(storedFields, fetchSourceContext, loader);
    LeafStoredFieldLoader leafStoredFieldLoader = storedFieldLoader.getLoader(docIdAndVersion.reader.getContext(), null);
    try {
        leafStoredFieldLoader.advanceTo(docIdAndVersion.docId);
    } catch (IOException e) {
        throw new ElasticsearchException("Failed to get id [" + id + "]", e);
    }
    final IndexVersion indexVersion = indexSettings.getIndexVersionCreated();
    if (leafStoredFieldLoader.storedFields().isEmpty() == false) {
        Set<String> needed = new HashSet<>();
        if (storedFields != null) {
            Collections.addAll(needed, storedFields);
        }
        needed.add(RoutingFieldMapper.NAME);
        documentFields = new HashMap<>();
        metadataFields = new HashMap<>();
        for (Map.Entry<String, List<Object>> entry : leafStoredFieldLoader.storedFields().entrySet()) {
            if (false == needed.contains(entry.getKey())) {
                continue;
            }
            if (IgnoredFieldMapper.NAME.equals(entry.getKey()) && indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) {
                continue;
            }
            MappedFieldType ft = mapperService.fieldType(entry.getKey());
            if (ft == null) {
                continue;
            }
            List<Object> values = entry.getValue().stream().map(ft::valueForDisplay).toList();
            if (mapperService.isMetadataField(entry.getKey())) {
                metadataFields.put(entry.getKey(), new DocumentField(entry.getKey(), values));
            } else {
                documentFields.put(entry.getKey(), new DocumentField(entry.getKey(), values));
            }
        }
    }
    if (indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) && storedFields != null && Arrays.asList(storedFields).contains(IgnoredFieldMapper.NAME)) {
        final DocumentField ignoredDocumentField = loadIgnoredMetadataField(docIdAndVersion);
        if (ignoredDocumentField != null) {
            if (metadataFields == null) {
                metadataFields = new HashMap<>();
            }
            metadataFields.put(IgnoredFieldMapper.NAME, ignoredDocumentField);
        }
    }
    BytesReference sourceBytes = null;
    if (mapperService.mappingLookup().isSourceEnabled() && fetchSourceContext.fetchSource()) {
        Source source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }).source(leafStoredFieldLoader, docIdAndVersion.docId);
        if (fetchSourceContext.hasFilter()) {
            source = source.filter(fetchSourceContext.filter());
        }
        sourceBytes = source.internalSourceRef();
    }
    return new GetResult(shardId.getIndexName(), id, get.docIdAndVersion().seqNo, get.docIdAndVersion().primaryTerm, get.version(), get.exists(), sourceBytes, documentFields, metadataFields);
}
302709.751299elasticsearch
public Suggestion<? extends Entry<? extends Option>> innerExecute(String name, PhraseSuggestionContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
    double realWordErrorLikelihood = suggestion.realworldErrorLikelihood();
    final PhraseSuggestion response = new PhraseSuggestion(name, suggestion.getSize());
    final IndexReader indexReader = searcher.getIndexReader();
    List<PhraseSuggestionContext.DirectCandidateGenerator> generators = suggestion.generators();
    final int numGenerators = generators.size();
    final List<CandidateGenerator> gens = new ArrayList<>(generators.size());
    for (int i = 0; i < numGenerators; i++) {
        PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i);
        DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker();
        Terms terms = MultiTerms.getTerms(indexReader, generator.field());
        if (terms != null) {
            gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms));
        }
    }
    final String suggestField = suggestion.getField();
    final Terms suggestTerms = MultiTerms.getTerms(indexReader, suggestField);
    if (gens.size() > 0 && suggestTerms != null) {
        final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
        final BytesRef separator = suggestion.separator();
        WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
        Result checkerResult;
        try (TokenStream stream = tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) {
            checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize());
        }
        PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
        response.addTerm(resultEntry);
        final BytesRefBuilder byteSpare = new BytesRefBuilder();
        final TemplateScript.Factory scriptFactory = suggestion.getCollateQueryScript();
        final boolean collatePrune = (scriptFactory != null) && suggestion.collatePrune();
        for (int i = 0; i < checkerResult.corrections.length; i++) {
            Correction correction = checkerResult.corrections[i];
            spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, null, null));
            boolean collateMatch = true;
            if (scriptFactory != null) {
                final Map<String, Object> vars = suggestion.getCollateScriptParams();
                vars.put(SUGGESTION_TEMPLATE_VAR_NAME, spare.toString());
                SearchExecutionContext searchExecutionContext = suggestion.getSearchExecutionContext();
                final String querySource = scriptFactory.newInstance(vars).execute();
                try (XContentParser parser = XContentFactory.xContent(querySource).createParser(searchExecutionContext.getParserConfig(), querySource)) {
                    QueryBuilder innerQueryBuilder = AbstractQueryBuilder.parseTopLevelQuery(parser);
                    final ParsedQuery parsedQuery = searchExecutionContext.toQuery(innerQueryBuilder);
                    collateMatch = Lucene.exists(searcher, parsedQuery.query());
                }
            }
            if (collateMatch == false && collatePrune == false) {
                continue;
            }
            Text phrase = new Text(spare.toString());
            Text highlighted = null;
            if (suggestion.getPreTag() != null) {
                spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
                highlighted = new Text(spare.toString());
            }
            if (collatePrune) {
                resultEntry.addOption(new PhraseSuggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
            } else {
                resultEntry.addOption(new PhraseSuggestion.Entry.Option(phrase, highlighted, (float) (correction.score)));
            }
        }
    } else {
        response.addTerm(buildResultEntry(suggestion, spare, Double.MIN_VALUE));
    }
    return response;
}
303262.821111elasticsearch
public void testProcessRemoteShards() {
    Map<String, SearchShardsResponse> searchShardsResponseMap = new LinkedHashMap<>();
    {
        List<DiscoveryNode> nodes = List.of(DiscoveryNodeUtils.create("node1"), DiscoveryNodeUtils.create("node2"));
        Map<String, AliasFilter> aliasFilters1 = Map.of("foo_id", AliasFilter.of(new TermsQueryBuilder("foo", "bar"), "some_alias_for_foo", "some_other_foo_alias"), "bar_id", AliasFilter.of(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY));
        List<SearchShardsGroup> groups = List.of(new SearchShardsGroup(new ShardId("foo", "foo_id", 0), List.of("node1", "node2"), false), new SearchShardsGroup(new ShardId("foo", "foo_id", 1), List.of("node2", "node1"), true), new SearchShardsGroup(new ShardId("bar", "bar_id", 0), List.of("node2", "node1"), false));
        searchShardsResponseMap.put("test_cluster_1", new SearchShardsResponse(groups, nodes, aliasFilters1));
    }
    {
        DiscoveryNode[] nodes2 = new DiscoveryNode[] { DiscoveryNodeUtils.create("node3") };
        ClusterSearchShardsGroup[] groups2 = new ClusterSearchShardsGroup[] { new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0), new ShardRouting[] { TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED) }) };
        Map<String, AliasFilter> aliasFilters2 = Map.of("xyz", AliasFilter.of(null, "some_alias_for_xyz"));
        searchShardsResponseMap.put("test_cluster_2", SearchShardsResponse.fromLegacyResponse(new ClusterSearchShardsResponse(groups2, nodes2, aliasFilters2)));
    }
    Map<String, OriginalIndices> remoteIndicesByCluster = Map.of("test_cluster_1", new OriginalIndices(new String[] { "fo*", "ba*" }, SearchRequest.DEFAULT_INDICES_OPTIONS), "test_cluster_2", new OriginalIndices(new String[] { "x*" }, SearchRequest.DEFAULT_INDICES_OPTIONS));
    Map<String, AliasFilter> aliasFilters = new HashMap<>();
    searchShardsResponseMap.values().forEach(r -> aliasFilters.putAll(r.getAliasFilters()));
    List<SearchShardIterator> iteratorList = TransportSearchAction.getRemoteShardsIterator(searchShardsResponseMap, remoteIndicesByCluster, aliasFilters);
    assertThat(iteratorList, hasSize(4));
    {
        SearchShardIterator shardIt = iteratorList.get(0);
        assertTrue(shardIt.prefiltered());
        assertFalse(shardIt.skip());
        assertThat(shardIt.shardId(), equalTo(new ShardId("foo", "foo_id", 0)));
        assertArrayEquals(new String[] { "some_alias_for_foo", "some_other_foo_alias" }, shardIt.getOriginalIndices().indices());
        assertEquals("test_cluster_1", shardIt.getClusterAlias());
        assertEquals("foo", shardIt.shardId().getIndexName());
        SearchShardTarget shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "foo");
        assertThat(shard.getNodeId(), equalTo("node1"));
        shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "foo");
        assertThat(shard.getNodeId(), equalTo("node2"));
        assertNull(shardIt.nextOrNull());
    }
    {
        SearchShardIterator shardIt = iteratorList.get(1);
        assertTrue(shardIt.prefiltered());
        assertTrue(shardIt.skip());
        assertThat(shardIt.shardId(), equalTo(new ShardId("foo", "foo_id", 1)));
        assertArrayEquals(new String[] { "some_alias_for_foo", "some_other_foo_alias" }, shardIt.getOriginalIndices().indices());
        assertEquals("test_cluster_1", shardIt.getClusterAlias());
        assertEquals("foo", shardIt.shardId().getIndexName());
        SearchShardTarget shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "foo");
        assertThat(shard.getNodeId(), equalTo("node2"));
        shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "foo");
        assertThat(shard.getNodeId(), equalTo("node1"));
        assertNull(shardIt.nextOrNull());
    }
    {
        SearchShardIterator shardIt = iteratorList.get(2);
        assertTrue(shardIt.prefiltered());
        assertFalse(shardIt.skip());
        assertThat(shardIt.shardId(), equalTo(new ShardId("bar", "bar_id", 0)));
        assertArrayEquals(new String[] { "bar" }, shardIt.getOriginalIndices().indices());
        assertEquals("test_cluster_1", shardIt.getClusterAlias());
        SearchShardTarget shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "bar");
        assertThat(shard.getNodeId(), equalTo("node2"));
        shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "bar");
        assertThat(shard.getNodeId(), equalTo("node1"));
        assertNull(shardIt.nextOrNull());
    }
    {
        SearchShardIterator shardIt = iteratorList.get(3);
        assertFalse(shardIt.prefiltered());
        assertFalse(shardIt.skip());
        assertArrayEquals(new String[] { "some_alias_for_xyz" }, shardIt.getOriginalIndices().indices());
        assertThat(shardIt.shardId(), equalTo(new ShardId("xyz", "xyz_id", 0)));
        assertEquals("test_cluster_2", shardIt.getClusterAlias());
        SearchShardTarget shard = shardIt.nextOrNull();
        assertNotNull(shard);
        assertEquals(shard.getShardId().getIndexName(), "xyz");
        assertThat(shard.getNodeId(), equalTo("node3"));
        assertNull(shardIt.nextOrNull());
    }
}
302664.269106elasticsearch
public void testConcurrentJoining() {
    List<DiscoveryNode> masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)).mapToObj(nodeId -> newNode(nodeId, true)).toList();
    List<DiscoveryNode> otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5)).mapToObj(nodeId -> newNode(nodeId, false)).toList();
    List<DiscoveryNode> allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).toList();
    DiscoveryNode localNode = masterNodes.get(0);
    VotingConfiguration votingConfiguration = new VotingConfiguration(randomValueOtherThan(singletonList(localNode), () -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes)).stream().map(DiscoveryNode::getId).collect(Collectors.toSet()));
    logger.info("Voting configuration: {}", votingConfiguration);
    long initialTerm = randomLongBetween(1, 10);
    long initialVersion = randomLongBetween(1, 10);
    setupRealMasterServiceAndCoordinator(initialTerm, initialState(localNode, initialTerm, initialVersion, votingConfiguration));
    long newTerm = initialTerm + randomLongBetween(1, 10);
    List<DiscoveryNode> successfulNodes;
    do {
        successfulNodes = randomSubsetOf(allNodes);
    } while (votingConfiguration.hasQuorum(successfulNodes.stream().map(DiscoveryNode::getId).toList()) == false);
    logger.info("Successful voting nodes: {}", successfulNodes);
    List<JoinRequest> correctJoinRequests = successfulNodes.stream().map(node -> new JoinRequest(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion)))).toList();
    List<DiscoveryNode> possiblyUnsuccessfulNodes = new ArrayList<>(allNodes);
    possiblyUnsuccessfulNodes.removeAll(successfulNodes);
    logger.info("Possibly unsuccessful voting nodes: {}", possiblyUnsuccessfulNodes);
    List<JoinRequest> possiblyFailingJoinRequests = possiblyUnsuccessfulNodes.stream().map(node -> {
        if (randomBoolean()) {
            return new JoinRequest(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion)));
        } else if (randomBoolean()) {
            return new JoinRequest(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), newTerm, Optional.of(new Join(node, localNode, randomLongBetween(0, initialTerm), initialTerm, initialVersion)));
        } else {
            return new JoinRequest(node, CompatibilityVersionsUtils.staticCurrent(), Set.of(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion + randomLongBetween(1, 10))));
        }
    }).collect(Collectors.toCollection(ArrayList::new));
    possiblyFailingJoinRequests.addAll(randomSubsetOf(possiblyFailingJoinRequests));
    final CyclicBarrier barrier = new CyclicBarrier(correctJoinRequests.size() + possiblyFailingJoinRequests.size() + 1);
    final AtomicBoolean stopAsserting = new AtomicBoolean();
    final Thread assertionThread = new Thread(() -> {
        safeAwait(barrier);
        while (stopAsserting.get() == false) {
            coordinator.invariant();
        }
    }, "assert invariants");
    final List<Thread> joinThreads = Stream.concat(correctJoinRequests.stream().map(joinRequest -> new Thread(() -> {
        safeAwait(barrier);
        joinNode(joinRequest);
    }, "process " + joinRequest)), possiblyFailingJoinRequests.stream().map(joinRequest -> new Thread(() -> {
        safeAwait(barrier);
        try {
            joinNode(joinRequest);
        } catch (CoordinationStateRejectedException e) {
        }
    }, "process " + joinRequest))).toList();
    assertionThread.start();
    joinThreads.forEach(Thread::start);
    joinThreads.forEach(t -> {
        try {
            t.join();
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    });
    stopAsserting.set(true);
    try {
        assertionThread.join();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster());
    for (DiscoveryNode successfulNode : successfulNodes) {
        assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode));
        assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode));
    }
}
30559.9615171elasticsearch
public void testSerializationFailuresDoNotLeak() throws InterruptedException {
    final ThreadPool threadPool = new TestThreadPool("test");
    try {
        threadPool.getThreadContext().markAsSystemContext();
        final boolean simulateFailures = randomBoolean();
        final Map<DiscoveryNode, TransportVersion> nodeTransports = new HashMap<>();
        final DiscoveryNode localNode = DiscoveryNodeUtils.builder("localNode").roles(Set.of(DiscoveryNodeRole.MASTER_ROLE)).build();
        final BytesRefRecycler recycler = new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY));
        final MockTransport mockTransport = new MockTransport() {

            @Nullable
            private Exception simulateException(String action, BytesTransportRequest request, DiscoveryNode node) {
                if (action.equals(PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME) && rarely()) {
                    if (isDiff(request, nodeTransports.get(node)) && randomBoolean()) {
                        return new IncompatibleClusterStateVersionException(randomNonNegativeLong(), UUIDs.randomBase64UUID(random()), randomNonNegativeLong(), UUIDs.randomBase64UUID(random()));
                    }
                    if (simulateFailures && randomBoolean()) {
                        return new IOException("simulated failure");
                    }
                }
                return null;
            }

            @Override
            protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {
                final Exception exception = simulateException(action, (BytesTransportRequest) request, node);
                if (exception == null) {
                    handleResponse(requestId, new PublishWithJoinResponse(new PublishResponse(randomNonNegativeLong(), randomNonNegativeLong()), Optional.empty()));
                } else {
                    handleError(requestId, new RemoteTransportException(node.getName(), node.getAddress(), action, exception));
                }
            }

            @Override
            public RecyclerBytesStreamOutput newNetworkBytesStream() {
                return new RecyclerBytesStreamOutput(recycler);
            }
        };
        final TransportService transportService = mockTransport.createTransportService(Settings.EMPTY, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), Collections.emptySet());
        final PublicationTransportHandler handler = new PublicationTransportHandler(transportService, writableRegistry(), pu -> null);
        transportService.start();
        transportService.acceptIncomingRequests();
        final List<DiscoveryNode> allNodes = new ArrayList<>();
        while (allNodes.size() < 10) {
            var node = DiscoveryNodeUtils.builder("node-" + allNodes.size()).version(VersionUtils.randomCompatibleVersion(random(), Version.CURRENT), IndexVersions.MINIMUM_COMPATIBLE, IndexVersionUtils.randomCompatibleVersion(random())).build();
            allNodes.add(node);
            nodeTransports.put(node, TransportVersionUtils.randomVersionBetween(random(), TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current()));
        }
        final DiscoveryNodes.Builder prevNodes = DiscoveryNodes.builder();
        prevNodes.add(localNode);
        prevNodes.localNodeId(localNode.getId());
        randomSubsetOf(allNodes).forEach(prevNodes::add);
        final DiscoveryNodes.Builder nextNodes = DiscoveryNodes.builder();
        nextNodes.add(localNode);
        nextNodes.localNodeId(localNode.getId());
        randomSubsetOf(allNodes).forEach(nextNodes::add);
        final ClusterState prevClusterState = CoordinationStateTests.clusterState(randomLongBetween(1L, Long.MAX_VALUE - 1), randomNonNegativeLong(), prevNodes.build(), VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L);
        final ClusterState nextClusterState = new ClusterState(randomNonNegativeLong(), UUIDs.randomBase64UUID(random()), CoordinationStateTests.clusterState(randomLongBetween(prevClusterState.term() + 1, Long.MAX_VALUE), randomNonNegativeLong(), nextNodes.build(), VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L)) {

            @Override
            public void writeTo(StreamOutput out) throws IOException {
                if (simulateFailures && rarely()) {
                    out.writeString("allocate something to detect leaks");
                    throw new IOException("simulated failure");
                } else {
                    super.writeTo(out);
                }
            }

            @Override
            public Diff<ClusterState> diff(ClusterState previousState) {
                if (simulateFailures && rarely()) {
                    return new Diff<ClusterState>() {

                        @Override
                        public ClusterState apply(ClusterState part) {
                            fail("this diff shouldn't be applied");
                            return part;
                        }

                        @Override
                        public void writeTo(StreamOutput out) throws IOException {
                            out.writeString("allocate something to detect leaks");
                            throw new IOException("simulated failure");
                        }
                    };
                } else {
                    return super.diff(previousState);
                }
            }
        };
        final PublicationTransportHandler.PublicationContext context;
        try {
            context = handler.newPublicationContext(new ClusterStatePublicationEvent(new BatchSummary(() -> "test"), prevClusterState, nextClusterState, new Task(randomNonNegativeLong(), "test", STATE_UPDATE_ACTION_NAME, "", TaskId.EMPTY_TASK_ID, emptyMap()), 0L, 0L));
        } catch (ElasticsearchException e) {
            assertTrue(simulateFailures);
            assertThat(e.getCause(), instanceOf(IOException.class));
            assertThat(e.getCause().getMessage(), equalTo("simulated failure"));
            return;
        }
        final CountDownLatch requestsLatch = new CountDownLatch(nextClusterState.nodes().getSize());
        final CountDownLatch responsesLatch = new CountDownLatch(nextClusterState.nodes().getSize());
        for (DiscoveryNode discoveryNode : nextClusterState.nodes()) {
            threadPool.generic().execute(() -> {
                context.sendPublishRequest(discoveryNode, new PublishRequest(nextClusterState), ActionListener.runAfter(ActionListener.wrap(r -> {
                }, e -> {
                    assert simulateFailures : e;
                    final Throwable inner = ExceptionsHelper.unwrap(e, IOException.class);
                    assert inner instanceof IOException : e;
                    assertThat(inner.getMessage(), equalTo("simulated failure"));
                }), responsesLatch::countDown));
                requestsLatch.countDown();
            });
        }
        assertTrue(requestsLatch.await(10, TimeUnit.SECONDS));
        context.decRef();
        assertTrue(responsesLatch.await(10, TimeUnit.SECONDS));
    } finally {
        assertTrue(ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
    }
}
305186.01188elasticsearch
public void testClusterAllActive1() {
    AllocationService strategy = createAllocationService(Settings.builder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test1").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)).put(IndexMetadata.builder("test2").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test1")).addAsNew(metadata.index("test2")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    logger.info("start two nodes");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
    }
    for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
        assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
    }
    logger.info("start all the primary shards for test1, replicas will start initializing");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
        assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
    }
    logger.info("start the test1 replica shards");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test1");
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
        assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
    }
    logger.info("start all the primary shards for test2, replicas will start initializing");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
        assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
    }
    logger.info("start the test2 replica shards");
    clusterState = startInitializingShardsAndReroute(strategy, clusterState, "test2");
    for (int i = 0; i < clusterState.routingTable().index("test1").size(); i++) {
        assertThat(clusterState.routingTable().index("test1").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    for (int i = 0; i < clusterState.routingTable().index("test2").size(); i++) {
        assertThat(clusterState.routingTable().index("test2").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
    }
    logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    assertThat(routingNodes.node("node3").size(), equalTo(1));
    assertThat(routingNodes.node("node3").iterator().next().shardId().getIndex().getName(), anyOf(equalTo("test1"), equalTo("test2")));
}
303664.022106elasticsearch
public void testFilterInitialRecovery() {
    ClusterSettings clusterSettings = createBuiltInClusterSettings();
    FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(Settings.EMPTY, clusterSettings);
    AllocationDeciders allocationDeciders = new AllocationDeciders(Arrays.asList(filterAllocationDecider, new SameShardAllocationDecider(clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider()));
    AllocationService service = new AllocationService(allocationDeciders, new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE, EmptySnapshotsInfoService.INSTANCE, TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY);
    ClusterState state = createInitialClusterState(service, Settings.builder().put("index.routing.allocation.initial_recovery._id", "node2").build());
    RoutingTable routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).shard(0).state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).shard(0).currentNodeId(), "node2");
    routingTable = service.applyFailedShards(state, List.of(new FailedShard(routingTable.index("idx").shard(0).shard(0), null, null, randomBoolean())), List.of()).routingTable();
    state = ClusterState.builder(state).routingTable(routingTable).build();
    assertEquals(routingTable.index("idx").shard(0).shard(0).state(), UNASSIGNED);
    assertNull(routingTable.index("idx").shard(0).shard(0).currentNodeId());
    RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
    allocation.debugDecision(true);
    Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node2"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
    ShardRouting primaryShard = routingTable.index("idx").shard(0).primaryShard();
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).primaryShard(), state.getRoutingNodes().node("node1"), allocation);
    assertEquals(Type.NO, decision.type());
    if (primaryShard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
        assertEquals("initial allocation of the shrunken index is only allowed on nodes [_id:\"node2\"] that " + "hold a copy of every shard in the index", decision.getExplanation());
    } else {
        assertEquals("initial allocation of the index is only allowed on nodes [_id:\"node2\"]", decision.getExplanation());
    }
    state = service.reroute(state, "try allocate again", ActionListener.noop());
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node2");
    state = startShardsAndReroute(service, state, routingTable.index("idx").shard(0).shardsWithState(INITIALIZING));
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
    state = startShardsAndReroute(service, state, routingTable.index("idx").shard(0).replicaShardsWithState(INITIALIZING));
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).state(), STARTED);
    assertEquals(routingTable.index("idx").shard(0).replicaShards().get(0).currentNodeId(), "node1");
    DiscoveryNode node1 = state.nodes().resolveNode("node1");
    state = service.disassociateDeadNodes(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).remove("node1")).build(), true, "test");
    state = service.applyFailedShards(state, List.of(new FailedShard(routingTable.index("idx").shard(0).primaryShard(), null, null, randomBoolean())), List.of());
    state = service.reroute(ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).add(node1)).build(), "test", ActionListener.noop());
    routingTable = state.routingTable();
    assertEquals(routingTable.index("idx").shard(0).primaryShard().state(), INITIALIZING);
    assertEquals(routingTable.index("idx").shard(0).primaryShard().currentNodeId(), "node1");
    allocation = new RoutingAllocation(allocationDeciders, state, null, null, 0);
    allocation.debugDecision(true);
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shard(0), state.getRoutingNodes().node("node2"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
    decision = (Decision.Single) filterAllocationDecider.canAllocate(routingTable.index("idx").shard(0).shard(0), state.getRoutingNodes().node("node1"), allocation);
    assertEquals(Type.YES, decision.type());
    assertEquals("node passes include/exclude/require filters", decision.getExplanation());
}
304054.364100elasticsearch
public void testRebalanceFailure() {
    AllocationService strategy = createAllocationService(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", 10).put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always").build());
    logger.info("Building initial routing table");
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(2).numberOfReplicas(1)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    logger.info("Adding two nodes and performing rerouting");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")).build();
    clusterState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    logger.info("Start the shards (primaries)");
    ClusterState newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(2));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
    }
    logger.info("Start the shards (backups)");
    newState = startInitializingShardsAndReroute(strategy, clusterState);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(clusterState.routingTable().index("test").size(), equalTo(2));
    for (int i = 0; i < clusterState.routingTable().index("test").size(); i++) {
        assertThat(clusterState.routingTable().index("test").shard(i).size(), equalTo(2));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().size(), equalTo(1));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
        assertThat(clusterState.routingTable().index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
    }
    logger.info("Adding third node and reroute");
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
    newState = strategy.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    RoutingNodes routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(2));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED) + routingNodes.node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED) + routingNodes.node("node2").numberOfShardsWithState(RELOCATING), equalTo(2));
    assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
    logger.info("Fail the shards on node 3");
    ShardRouting shardToFail = routingNodes.node("node3").iterator().next();
    newState = applyFailedShard(strategy, clusterState, shardToFail);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    routingNodes = clusterState.getRoutingNodes();
    assertThat(clusterState.routingTable().index("test").size(), equalTo(2));
    assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED) + routingNodes.node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
    assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED) + routingNodes.node("node2").numberOfShardsWithState(RELOCATING), equalTo(2));
    if (strategy.isBalancedShardsAllocator()) {
        assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
        assertThat(routingNodes.node("node3").iterator().next().shardId(), not(equalTo(shardToFail.shardId())));
    } else {
        assertFalse(routingNodes.node("node3").iterator().hasNext());
        clusterState = strategy.reroute(clusterState, "test", ActionListener.noop());
        routingNodes = clusterState.getRoutingNodes();
        assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
        assertThat(routingNodes.node("node3").iterator().next().shardId(), equalTo(shardToFail.shardId()));
    }
}
303861.836103elasticsearch
public void testReplicaOnNewestVersionIsPromoted() {
    AllocationService allocation = createAllocationService(Settings.builder().build());
    Metadata metadata = Metadata.builder().put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(3)).build();
    RoutingTable initialRoutingTable = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(metadata.index("test")).build();
    ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(initialRoutingTable).build();
    ShardId shardId = new ShardId(metadata.index("test").getIndex(), 0);
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1-5.x", Version.fromId(5060099)))).build();
    clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute", ActionListener.noop()).routingTable()).build();
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(1));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(3));
    clusterState = startInitializingShardsAndReroute(allocation, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(1));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(3));
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2-5.x", Version.fromId(5060099)))).build();
    clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(1));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(1));
    clusterState = startInitializingShardsAndReroute(allocation, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(2));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(2));
    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3-old", VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), null), IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, null))).add(newNode("node4-old", VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), null), IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, null)))).build();
    clusterState = allocation.reroute(clusterState, "reroute", ActionListener.noop());
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(2));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), INITIALIZING).size(), equalTo(2));
    clusterState = startInitializingShardsAndReroute(allocation, clusterState);
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(4));
    assertThat(shardsWithState(clusterState.getRoutingNodes(), UNASSIGNED).size(), equalTo(0));
    ShardRouting startedReplica = clusterState.getRoutingNodes().activePromotableReplicaWithHighestVersion(shardId);
    logger.info("--> all shards allocated, replica that should be promoted: {}", startedReplica);
    ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
    ClusterState newState = applyFailedShard(allocation, clusterState, primaryShardToFail);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(3));
    ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
    assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));
    assertThat(newPrimaryShard.allocationId(), equalTo(startedReplica.allocationId()));
    Version replicaNodeVersion = clusterState.nodes().getDataNodes().get(startedReplica.currentNodeId()).getVersion();
    assertNotNull(replicaNodeVersion);
    logger.info("--> shard {} got assigned to node with version {}", startedReplica, replicaNodeVersion);
    for (DiscoveryNode discoveryNode : clusterState.nodes().getDataNodes().values()) {
        if ("node1".equals(discoveryNode.getId())) {
            continue;
        }
        Version nodeVer = discoveryNode.getVersion();
        assertTrue("expected node [" + discoveryNode.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, replicaNodeVersion.onOrAfter(nodeVer));
    }
    startedReplica = clusterState.getRoutingNodes().activePromotableReplicaWithHighestVersion(shardId);
    logger.info("--> failing primary shard a second time, should select: {}", startedReplica);
    ShardRouting secondPrimaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
    newState = applyFailedShard(allocation, clusterState, secondPrimaryShardToFail);
    assertThat(newState, not(equalTo(clusterState)));
    clusterState = newState;
    assertThat(shardsWithState(clusterState.getRoutingNodes(), STARTED).size(), equalTo(2));
    newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
    assertThat(newPrimaryShard, not(equalTo(secondPrimaryShardToFail)));
    assertThat(newPrimaryShard.allocationId(), equalTo(startedReplica.allocationId()));
    replicaNodeVersion = clusterState.nodes().getDataNodes().get(startedReplica.currentNodeId()).getVersion();
    assertNotNull(replicaNodeVersion);
    logger.info("--> shard {} got assigned to node with version {}", startedReplica, replicaNodeVersion);
    for (DiscoveryNode discoveryNode : clusterState.nodes().getDataNodes().values()) {
        if (primaryShardToFail.currentNodeId().equals(discoveryNode.getId()) || secondPrimaryShardToFail.currentNodeId().equals(discoveryNode.getId())) {
            continue;
        }
        Version nodeVer = discoveryNode.getVersion();
        assertTrue("expected node [" + discoveryNode.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, replicaNodeVersion.onOrAfter(nodeVer));
    }
}
302900.944116elasticsearch
private void innerReadyForSearch(boolean fastRefresh) {
    Index index = new Index(randomIdentifier(), UUIDs.randomBase64UUID());
    ClusterState clusterState = mock(ClusterState.class, Mockito.RETURNS_DEEP_STUBS);
    when(clusterState.metadata().index(any(Index.class)).getSettings()).thenReturn(Settings.builder().put(INDEX_FAST_REFRESH_SETTING.getKey(), fastRefresh).build());
    ShardId p1 = new ShardId(index, 0);
    IndexShardRoutingTable shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.DEFAULT)));
    ShardId p2 = new ShardId(index, 1);
    IndexShardRoutingTable shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.DEFAULT)));
    IndexRoutingTable indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    assertTrue(indexRoutingTable.readyForSearch(clusterState));
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    if (fastRefresh) {
        assertTrue(indexRoutingTable.readyForSearch(clusterState));
    } else {
        assertFalse(indexRoutingTable.readyForSearch(clusterState));
    }
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    assertFalse(indexRoutingTable.readyForSearch(clusterState));
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY), getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    if (fastRefresh) {
        assertTrue(indexRoutingTable.readyForSearch(clusterState));
    } else {
        assertFalse(indexRoutingTable.readyForSearch(clusterState));
    }
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    assertTrue(indexRoutingTable.readyForSearch(clusterState));
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.UNASSIGNED, ShardRouting.Role.INDEX_ONLY), getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    if (fastRefresh) {
        assertFalse(indexRoutingTable.readyForSearch(clusterState));
    } else {
        assertTrue(indexRoutingTable.readyForSearch(clusterState));
    }
    shardTable1 = new IndexShardRoutingTable(p1, List.of(getShard(p1, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p1, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY), getShard(p1, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY)));
    shardTable2 = new IndexShardRoutingTable(p2, List.of(getShard(p2, true, ShardRoutingState.STARTED, ShardRouting.Role.INDEX_ONLY), getShard(p2, false, ShardRoutingState.UNASSIGNED, ShardRouting.Role.SEARCH_ONLY), getShard(p2, false, ShardRoutingState.STARTED, ShardRouting.Role.SEARCH_ONLY)));
    indexRoutingTable = new IndexRoutingTable(index, new IndexShardRoutingTable[] { shardTable1, shardTable2 });
    assertTrue(indexRoutingTable.readyForSearch(clusterState));
}
302609.58111elasticsearch
public void testReadOnlyEngine() throws Exception {
    IOUtils.close(engine, store);
    Engine readOnlyEngine = null;
    final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
    try (Store store = createStore()) {
        EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
        int numDocs = scaledRandomIntBetween(10, 1000);
        final SeqNoStats lastSeqNoStats;
        final List<DocIdSeqNoAndSource> lastDocIds;
        try (InternalEngine engine = createEngine(config)) {
            Engine.Get get = null;
            for (int i = 0; i < numDocs; i++) {
                ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
                engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
                if (get == null || rarely()) {
                    get = newGet(randomBoolean(), doc);
                }
                if (rarely()) {
                    engine.flush();
                }
                globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint()));
            }
            engine.syncTranslog();
            globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getPersistedLocalCheckpoint()));
            engine.flush();
            readOnlyEngine = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), engine.getTranslogStats(), false, Function.identity(), true, randomBoolean());
            lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get());
            lastDocIds = getDocIds(engine, true);
            assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint()));
            assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo()));
            assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds));
            for (int i = 0; i < numDocs; i++) {
                if (randomBoolean()) {
                    String delId = Integer.toString(i);
                    engine.delete(new Engine.Delete(delId, newUid(delId), primaryTerm.get()));
                }
                if (rarely()) {
                    engine.flush();
                }
            }
            try (ReadOnlyEngine readOnlyEngineWithLazySoftDeletes = new ReadOnlyEngine(engine.engineConfig, engine.getSeqNoStats(globalCheckpoint.get()), engine.getTranslogStats(), false, Function.identity(), true, true)) {
                EngineTestCase.checkNoSoftDeletesLoaded(readOnlyEngineWithLazySoftDeletes);
            }
            Engine.Searcher external = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.EXTERNAL);
            Engine.Searcher internal = readOnlyEngine.acquireSearcher("test", Engine.SearcherScope.INTERNAL);
            assertSame(external.getIndexReader(), internal.getIndexReader());
            assertThat(external.getIndexReader(), instanceOf(DirectoryReader.class));
            DirectoryReader dirReader = external.getDirectoryReader();
            ElasticsearchDirectoryReader esReader = getElasticsearchDirectoryReader(dirReader);
            IndexReader.CacheHelper helper = esReader.getReaderCacheHelper();
            assertNotNull(helper);
            assertEquals(helper.getKey(), dirReader.getReaderCacheHelper().getKey());
            IOUtils.close(external, internal);
            assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint()));
            assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo()));
            assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds));
            MapperService mapperService = createMapperService();
            try (Engine.GetResult getResult = readOnlyEngine.get(get, mapperService.mappingLookup(), mapperService.documentParser(), randomSearcherWrapper())) {
                assertTrue(getResult.exists());
            }
        }
        try (InternalEngine recoveringEngine = new InternalEngine(config)) {
            recoverFromTranslog(recoveringEngine, translogHandler, Long.MAX_VALUE);
            assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint()));
            assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo()));
            assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds));
        }
    } finally {
        IOUtils.close(readOnlyEngine);
    }
}
302936.81394elasticsearch
public void testFilterByFrequency() throws Exception {
    Random random = random();
    for (int i = 0; i < 1000; i++) {
        Document d = new Document();
        d.add(new StringField("id", "" + i, Field.Store.NO));
        if (i % 100 == 0) {
            d.add(new StringField("high_freq", "100", Field.Store.NO));
            d.add(new StringField("low_freq", "100", Field.Store.NO));
            d.add(new StringField("med_freq", "100", Field.Store.NO));
        }
        if (i % 10 == 0) {
            d.add(new StringField("high_freq", "10", Field.Store.NO));
            d.add(new StringField("med_freq", "10", Field.Store.NO));
        }
        if (i % 5 == 0) {
            d.add(new StringField("high_freq", "5", Field.Store.NO));
        }
        writer.addDocument(d);
    }
    writer.forceMerge(1, true);
    List<LeafReaderContext> contexts = refreshReader();
    final MapperBuilderContext builderContext = MapperBuilderContext.root(false, false);
    {
        indexService.clearCaches(false, true);
        MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers(), indexService.getIndexSettings().getMode().isSyntheticSourceEnabled()).fielddata(true).fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0).build(builderContext).fieldType();
        IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH);
        for (LeafReaderContext context : contexts) {
            LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
            SortedSetDocValues bytesValues = loadDirect.getOrdinalsValues();
            assertThat(2L, equalTo(bytesValues.getValueCount()));
            assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
            assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
        }
    }
    {
        indexService.clearCaches(false, true);
        MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers(), indexService.getIndexSettings().getMode().isSyntheticSourceEnabled()).fielddata(true).fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, 201, 100).build(builderContext).fieldType();
        IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH);
        for (LeafReaderContext context : contexts) {
            LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
            SortedSetDocValues bytesValues = loadDirect.getOrdinalsValues();
            assertThat(1L, equalTo(bytesValues.getValueCount()));
            assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("5"));
        }
    }
    {
        indexService.clearCaches(false, true);
        MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers(), indexService.getIndexSettings().getMode().isSyntheticSourceEnabled()).fielddata(true).fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101).build(builderContext).fieldType();
        IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH);
        for (LeafReaderContext context : contexts) {
            LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
            SortedSetDocValues bytesValues = loadDirect.getOrdinalsValues();
            assertThat(2L, equalTo(bytesValues.getValueCount()));
            assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
            assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
        }
    }
    {
        indexService.clearCaches(false, true);
        MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers(), indexService.getIndexSettings().getMode().isSyntheticSourceEnabled()).fielddata(true).fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101).build(builderContext).fieldType();
        IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH);
        for (LeafReaderContext context : contexts) {
            LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
            SortedSetDocValues bytesValues = loadDirect.getOrdinalsValues();
            assertThat(2L, equalTo(bytesValues.getValueCount()));
            assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
            assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
        }
    }
}
304614.141104elasticsearch
public void testPreGetChildLeafCollectors() throws IOException {
    try (Directory directory = newDirectory()) {
        try (RandomIndexWriter iw = newRandomIndexWriterWithLogDocMergePolicy(directory)) {
            List<Iterable<IndexableField>> documents = new ArrayList<>();
            LuceneDocument document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("1"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key1")));
            document.add(new SortedDocValuesField("value", new BytesRef("a1")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("1"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key2")));
            document.add(new SortedDocValuesField("value", new BytesRef("b1")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("1"), Field.Store.YES));
            document.add(new StringField(NestedPathFieldMapper.NAME, "_doc", Field.Store.NO));
            sequenceIDFields.addFields(document);
            documents.add(document);
            iw.addDocuments(documents);
            iw.commit();
            documents.clear();
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("2"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key1")));
            document.add(new SortedDocValuesField("value", new BytesRef("a2")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("2"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key2")));
            document.add(new SortedDocValuesField("value", new BytesRef("b2")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("2"), Field.Store.YES));
            document.add(new StringField(NestedPathFieldMapper.NAME, "_doc", Field.Store.NO));
            sequenceIDFields.addFields(document);
            documents.add(document);
            iw.addDocuments(documents);
            iw.commit();
            documents.clear();
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("3"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key1")));
            document.add(new SortedDocValuesField("value", new BytesRef("a3")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("3"), Field.Store.NO));
            document.add(new StringField(NestedPathFieldMapper.NAME, "nested_field", Field.Store.NO));
            document.add(new SortedDocValuesField("key", new BytesRef("key2")));
            document.add(new SortedDocValuesField("value", new BytesRef("b3")));
            documents.add(document);
            document = new LuceneDocument();
            document.add(new StringField(IdFieldMapper.NAME, Uid.encodeId("3"), Field.Store.YES));
            document.add(new StringField(NestedPathFieldMapper.NAME, "_doc", Field.Store.NO));
            sequenceIDFields.addFields(document);
            documents.add(document);
            iw.addDocuments(documents);
            iw.commit();
        }
        try (DirectoryReader indexReader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) {
            TermsAggregationBuilder valueBuilder = new TermsAggregationBuilder("value").userValueTypeHint(ValueType.STRING).field("value");
            TermsAggregationBuilder keyBuilder = new TermsAggregationBuilder("key").userValueTypeHint(ValueType.STRING).field("key");
            keyBuilder.subAggregation(valueBuilder);
            NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, "nested_field");
            nestedBuilder.subAggregation(keyBuilder);
            FilterAggregationBuilder filterAggregationBuilder = new FilterAggregationBuilder("filterAgg", new MatchAllQueryBuilder());
            filterAggregationBuilder.subAggregation(nestedBuilder);
            MappedFieldType fieldType1 = new KeywordFieldMapper.KeywordFieldType("key");
            MappedFieldType fieldType2 = new KeywordFieldMapper.KeywordFieldType("value");
            Filter filter = searchAndReduce(indexReader, new AggTestConfig(filterAggregationBuilder, fieldType1, fieldType2).withQuery(Queries.newNonNestedFilter(IndexVersion.current())));
            assertEquals("filterAgg", filter.getName());
            assertEquals(3L, filter.getDocCount());
            InternalNested nested = filter.getAggregations().get(NESTED_AGG);
            assertEquals(6L, nested.getDocCount());
            StringTerms keyAgg = nested.getAggregations().get("key");
            assertEquals(2, keyAgg.getBuckets().size());
            Terms.Bucket key1 = keyAgg.getBuckets().get(0);
            assertEquals("key1", key1.getKey());
            StringTerms valueAgg = key1.getAggregations().get("value");
            assertEquals(3, valueAgg.getBuckets().size());
            assertEquals("a1", valueAgg.getBuckets().get(0).getKey());
            assertEquals("a2", valueAgg.getBuckets().get(1).getKey());
            assertEquals("a3", valueAgg.getBuckets().get(2).getKey());
            Terms.Bucket key2 = keyAgg.getBuckets().get(1);
            assertEquals("key2", key2.getKey());
            valueAgg = key2.getAggregations().get("value");
            assertEquals(3, valueAgg.getBuckets().size());
            assertEquals("b1", valueAgg.getBuckets().get(0).getKey());
            assertEquals("b2", valueAgg.getBuckets().get(1).getKey());
            assertEquals("b3", valueAgg.getBuckets().get(2).getKey());
        }
    }
}
303886.343104elasticsearch
public void testNumericSortOptimization() throws Exception {
    final String fieldNameLong = "long-field";
    final String fieldNameDate = "date-field";
    MappedFieldType fieldTypeLong = new NumberFieldMapper.NumberFieldType(fieldNameLong, NumberFieldMapper.NumberType.LONG);
    MappedFieldType fieldTypeDate = new DateFieldMapper.DateFieldType(fieldNameDate);
    SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class);
    when(searchExecutionContext.getFieldType(fieldNameLong)).thenReturn(fieldTypeLong);
    when(searchExecutionContext.getFieldType(fieldNameDate)).thenReturn(fieldTypeDate);
    final int numDocs = atLeast(3500 * 2);
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(null));
    long startLongValue = randomLongBetween(-10000000L, 10000000L);
    long longValue = startLongValue;
    long dateValue = randomLongBetween(0, 3000000000000L);
    for (int i = 1; i <= numDocs; ++i) {
        Document doc = new Document();
        doc.add(new LongPoint(fieldNameLong, longValue));
        doc.add(new NumericDocValuesField(fieldNameLong, longValue));
        doc.add(new LongPoint(fieldNameDate, dateValue));
        doc.add(new NumericDocValuesField(fieldNameDate, dateValue));
        writer.addDocument(doc);
        longValue++;
        dateValue++;
        if (i % 3500 == 0)
            writer.flush();
    }
    writer.close();
    reader = DirectoryReader.open(dir);
    final SortField sortFieldLong = new SortField(fieldNameLong, SortField.Type.LONG);
    final SortField sortFieldDate = new SortField(fieldNameDate, SortField.Type.LONG);
    sortFieldLong.setMissingValue(Long.MAX_VALUE);
    sortFieldDate.setMissingValue(Long.MAX_VALUE);
    final Sort sortLong = new Sort(sortFieldLong);
    final Sort sortDate = new Sort(sortFieldDate);
    final Sort sortLongDate = new Sort(sortFieldLong, sortFieldDate);
    final Sort sortDateLong = new Sort(sortFieldDate, sortFieldLong);
    final DocValueFormat dvFormatDate = fieldTypeDate.docValueFormat(null, null);
    final SortAndFormats formatsLong = new SortAndFormats(sortLong, new DocValueFormat[] { DocValueFormat.RAW });
    final SortAndFormats formatsDate = new SortAndFormats(sortDate, new DocValueFormat[] { dvFormatDate });
    final SortAndFormats formatsLongDate = new SortAndFormats(sortLongDate, new DocValueFormat[] { DocValueFormat.RAW, dvFormatDate });
    final SortAndFormats formatsDateLong = new SortAndFormats(sortDateLong, new DocValueFormat[] { dvFormatDate, DocValueFormat.RAW });
    Query q = LongPoint.newRangeQuery(fieldNameLong, startLongValue, startLongValue + numDocs);
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsLong);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.setSize(10);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        assertSortResults(searchContext.queryResult().topDocs().topDocs, numDocs, false);
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        int afterDoc = (int) randomLongBetween(0, 30);
        long afterValue = startLongValue + afterDoc;
        FieldDoc after = new FieldDoc(afterDoc, Float.NaN, new Long[] { afterValue });
        searchContext.searchAfter(after);
        searchContext.sort(formatsLong);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.setSize(10);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        final TopDocs topDocs = searchContext.queryResult().topDocs().topDocs;
        long firstResult = (long) ((FieldDoc) topDocs.scoreDocs[0]).fields[0];
        assertThat(firstResult, greaterThan(afterValue));
        assertSortResults(topDocs, numDocs, false);
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsLongDate);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.setSize(10);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        assertSortResults(searchContext.queryResult().topDocs().topDocs, numDocs, true);
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsDate);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.setSize(10);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        assertSortResults(searchContext.queryResult().topDocs().topDocs, numDocs, false);
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsDateLong);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.setSize(10);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        assertSortResults(searchContext.queryResult().topDocs().topDocs, numDocs, true);
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsLong);
        searchContext.trackTotalHitsUpTo(10);
        searchContext.from(5);
        searchContext.setSize(0);
        QueryPhase.addCollectorsAndSearch(searchContext);
        assertTrue(searchContext.sort().sort.getSort()[0].getOptimizeSortWithPoints());
        assertThat(searchContext.queryResult().topDocs().topDocs.scoreDocs, arrayWithSize(0));
        assertThat(searchContext.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs));
        assertThat(searchContext.queryResult().topDocs().topDocs.totalHits.relation, equalTo(TotalHits.Relation.EQUAL_TO));
    }
    try (TestSearchContext searchContext = createContext(newContextSearcher(reader), q)) {
        searchContext.sort(formatsLong);
        searchContext.setSize(0);
        QueryPhase.addCollectorsAndSearch(searchContext);
    }
}
303287.681112elasticsearch
public void testXContent() throws IOException {
    String reason = randomAlphaOfLength(10);
    long unassigned = randomNonNegativeLong();
    long assigned = randomNonNegativeLong();
    String indexUUID = UUIDs.randomBase64UUID();
    String indexName = randomAlphaOfLength(10);
    SortedSet<ShardId> unassignedShardIds = new TreeSet<>(randomUnique(() -> new ShardId(indexName, indexUUID, randomInt(1000)), 600));
    SortedSet<ShardId> assignedShardIds = new TreeSet<>(randomUnique(() -> new ShardId(indexName, indexUUID, randomInt(1000)), 600));
    DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("node1").roles(emptySet()).build();
    DiscoveryNode discoveryNode2 = DiscoveryNodeUtils.builder("node2").roles(emptySet()).build();
    Map<ShardId, NodeDecisions> unassignedShardAllocationDecision = Map.of(unassignedShardIds.first(), new NodeDecisions(List.of(new NodeDecision(discoveryNode, Decision.single(Decision.Type.NO, "no_label", "No space to allocate")), new NodeDecision(discoveryNode2, new Decision.Multi().add(Decision.single(Decision.Type.YES, "data_tier", "Enough disk on this node for the shard to remain")).add(Decision.single(Decision.Type.NO, "shards_limit", "Disallowed because of shard limits")))), null));
    Map<ShardId, NodeDecisions> assignedShardAllocateDecision = Map.of(assignedShardIds.first(), new NodeDecisions(List.of(), new NodeDecision(discoveryNode, new Decision.Multi().add(Decision.single(Decision.Type.THROTTLE, "multi_throttle", "is not active yet")).add(new Decision.Single(Decision.Type.NO, "multi_no", "No multi decision")).add(new Decision.Single(Decision.Type.YES, "multi_yes", "Yes multi decision")))));
    var reactiveReason = new ReactiveStorageDeciderService.ReactiveReason(reason, unassigned, unassignedShardIds, assigned, assignedShardIds, unassignedShardAllocationDecision, assignedShardAllocateDecision);
    try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(reactiveReason.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)))) {
        Map<String, Object> map = parser.map();
        assertEquals(reason, map.get("reason"));
        assertEquals(unassigned, map.get("unassigned"));
        assertEquals(assigned, map.get("assigned"));
        List<String> xContentUnassignedShardIds = (List<String>) map.get("unassigned_shards");
        assertEquals(unassignedShardIds.stream().map(ShardId::toString).limit(ReactiveStorageDeciderService.ReactiveReason.MAX_AMOUNT_OF_SHARDS).toList(), xContentUnassignedShardIds);
        assertSorted(xContentUnassignedShardIds.stream().map(ShardId::fromString).toList());
        assertEquals(unassignedShardIds.size(), map.get("unassigned_shards_count"));
        List<String> xContentAssignedShardIds = (List<String>) map.get("assigned_shards");
        assertEquals(assignedShardIds.stream().map(ShardId::toString).limit(ReactiveStorageDeciderService.ReactiveReason.MAX_AMOUNT_OF_SHARDS).collect(Collectors.toList()), xContentAssignedShardIds);
        assertSorted(xContentAssignedShardIds.stream().map(ShardId::fromString).toList());
        assertEquals(assignedShardIds.size(), map.get("assigned_shards_count"));
        Map<String, Object> unassignedNodeDecisions = (Map<String, Object>) ((Map<String, Object>) map.get("unassigned_node_decisions")).get(unassignedShardIds.first().toString());
        List<Map<String, Object>> canAllocateDecisions = (List<Map<String, Object>>) unassignedNodeDecisions.get("can_allocate_decisions");
        assertEquals(2, canAllocateDecisions.size());
        assertEquals("node1", canAllocateDecisions.get(0).get("node_id"));
        assertEquals(List.of(Map.of("decision", "NO", "decider", "no_label", "explanation", "No space to allocate")), canAllocateDecisions.get(0).get("deciders"));
        assertEquals("node2", canAllocateDecisions.get(1).get("node_id"));
        assertEquals(List.of(Map.of("decision", "YES", "decider", "data_tier", "explanation", "Enough disk on this node for the shard to remain"), Map.of("decision", "NO", "decider", "shards_limit", "explanation", "Disallowed because of shard limits")), canAllocateDecisions.get(1).get("deciders"));
        assertFalse(unassignedNodeDecisions.containsKey("can_remain_decision"));
        Map<String, Object> assignedNodeDecisions = (Map<String, Object>) ((Map<String, Object>) map.get("assigned_node_decisions")).get(assignedShardIds.first().toString());
        var canRemainDecision = (Map<String, Object>) assignedNodeDecisions.get("can_remain_decision");
        assertEquals("node1", canRemainDecision.get("node_id"));
        assertEquals(List.of(Map.of("decision", "THROTTLE", "decider", "multi_throttle", "explanation", "is not active yet"), Map.of("decision", "NO", "decider", "multi_no", "explanation", "No multi decision"), Map.of("decision", "YES", "decider", "multi_yes", "explanation", "Yes multi decision")), canRemainDecision.get("deciders"));
        assertEquals(List.of(), assignedNodeDecisions.get("can_allocate_decisions"));
    }
}
30815.987167elasticsearch
private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testRun) {
    AtomicBoolean stopped = new AtomicBoolean(false);
    ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), new ShardId("leader_index", "", 0), testRun.maxOperationCount, testRun.maxOperationCount, concurrency, concurrency, TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, 10240, new ByteSizeValue(512, ByteSizeUnit.MB), TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap());
    ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName());
    BiConsumer<TimeValue, Runnable> scheduler = (delay, task) -> {
        assert delay.millis() < 100 : "The delay should be kept to a minimum, so that this test does not take to long to run";
        if (stopped.get() == false) {
            threadPool.schedule(task, delay, threadPool.generic());
        }
    };
    List<Translog.Operation> receivedOperations = Collections.synchronizedList(new ArrayList<>());
    LocalCheckpointTracker tracker = new LocalCheckpointTracker(testRun.startSeqNo - 1, testRun.startSeqNo - 1);
    return new ShardFollowNodeTask(1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) {

        private volatile long mappingVersion = 0L;

        private volatile long settingsVersion = 0L;

        private volatile long aliasesVersion = 0L;

        private final Map<Long, Integer> fromToSlot = new HashMap<>();

        @Override
        protected void innerUpdateMapping(long minRequiredMappingVersion, LongConsumer handler, Consumer<Exception> errorHandler) {
            handler.accept(mappingVersion);
        }

        @Override
        protected void innerUpdateSettings(LongConsumer handler, Consumer<Exception> errorHandler) {
            handler.accept(settingsVersion);
        }

        @Override
        protected void innerUpdateAliases(LongConsumer handler, Consumer<Exception> errorHandler) {
            handler.accept(aliasesVersion);
        }

        @Override
        protected void innerSendBulkShardOperationsRequest(String followerHistoryUUID, List<Translog.Operation> operations, long maxSeqNoOfUpdates, Consumer<BulkShardOperationsResponse> handler, Consumer<Exception> errorHandler) {
            for (Translog.Operation op : operations) {
                tracker.markSeqNoAsProcessed(op.seqNo());
            }
            receivedOperations.addAll(operations);
            final BulkShardOperationsResponse response = new BulkShardOperationsResponse();
            response.setGlobalCheckpoint(tracker.getProcessedCheckpoint());
            response.setMaxSeqNo(tracker.getMaxSeqNo());
            threadPool.generic().execute(() -> handler.accept(response));
        }

        @Override
        protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer<ShardChangesAction.Response> handler, Consumer<Exception> errorHandler) {
            Runnable task = () -> {
                List<TestResponse> items = testRun.responses.get(from);
                if (items != null) {
                    final TestResponse testResponse;
                    synchronized (fromToSlot) {
                        int slot;
                        if (fromToSlot.get(from) == null) {
                            slot = fromToSlot.getOrDefault(from, 0);
                            fromToSlot.put(from, slot);
                        } else {
                            slot = fromToSlot.get(from);
                        }
                        testResponse = items.get(slot);
                        fromToSlot.put(from, ++slot);
                    }
                    mappingVersion = testResponse.mappingVersion;
                    settingsVersion = testResponse.settingsVersion;
                    if (testResponse.exception != null) {
                        errorHandler.accept(testResponse.exception);
                    } else {
                        handler.accept(testResponse.response);
                    }
                } else {
                    assert from >= testRun.finalExpectedGlobalCheckpoint;
                    final long globalCheckpoint = tracker.getProcessedCheckpoint();
                    final long maxSeqNo = tracker.getMaxSeqNo();
                    handler.accept(new ShardChangesAction.Response(0L, 0L, 0L, globalCheckpoint, maxSeqNo, randomNonNegativeLong(), new Translog.Operation[0], 1L));
                }
            };
            threadPool.generic().execute(task);
        }

        @Override
        protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) {
            return new Scheduler.Cancellable() {

                @Override
                public boolean cancel() {
                    return true;
                }

                @Override
                public boolean isCancelled() {
                    return true;
                }
            };
        }

        @Override
        protected boolean isStopped() {
            return stopped.get();
        }

        @Override
        public void markAsCompleted() {
            stopped.set(true);
            tearDown();
        }

        @Override
        public void markAsFailed(Exception e) {
            stopped.set(true);
            tearDown();
        }

        private void tearDown() {
            threadPool.shutdown();
            List<Translog.Operation> expectedOperations = testRun.responses.values().stream().flatMap(List::stream).map(testResponse -> testResponse.response).filter(Objects::nonNull).flatMap(response -> Arrays.stream(response.getOperations())).sorted(Comparator.comparingLong(Translog.Operation::seqNo)).collect(Collectors.toList());
            assertThat(receivedOperations.size(), equalTo(expectedOperations.size()));
            receivedOperations.sort(Comparator.comparingLong(Translog.Operation::seqNo));
            for (int i = 0; i < receivedOperations.size(); i++) {
                Translog.Operation actual = receivedOperations.get(i);
                Translog.Operation expected = expectedOperations.get(i);
                assertThat(actual, equalTo(expected));
            }
        }
    };
}
302952.341112elasticsearch
public void testIsConditionMetForUnderlyingStep() {
    {
        IndexMetadata indexMetadata = IndexMetadata.builder("follower-index").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true").put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "480h")).putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))).putCustom(CCR_METADATA_KEY, Collections.emptyMap()).numberOfShards(1).numberOfReplicas(0).build();
        ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")).metadata(Metadata.builder().put(indexMetadata, true).build()).build();
        WaitForIndexingCompleteStep stepToExecute = new WaitForIndexingCompleteStep(randomStepKey(), randomStepKey());
        ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, randomStepKey());
        ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState);
        assertThat(result.isComplete(), is(true));
        assertThat(result.getInfomationContext(), nullValue());
    }
    {
        IndexMetadata indexMetadata = IndexMetadata.builder("follower-index").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "false").put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "48h")).putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(System.currentTimeMillis()))).putCustom(CCR_METADATA_KEY, Collections.emptyMap()).numberOfShards(1).numberOfReplicas(0).build();
        ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")).metadata(Metadata.builder().put(indexMetadata, true).build()).build();
        WaitForIndexingCompleteStep stepToExecute = new WaitForIndexingCompleteStep(randomStepKey(), randomStepKey());
        ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, randomStepKey());
        ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState);
        assertThat(result.isComplete(), is(false));
        assertThat(result.getInfomationContext(), notNullValue());
        WaitForIndexingCompleteStep.IndexingNotCompleteInfo info = (WaitForIndexingCompleteStep.IndexingNotCompleteInfo) result.getInfomationContext();
        assertThat(info.getMessage(), equalTo("waiting for the [index.lifecycle.indexing_complete] setting to be set to " + "true on the leader index, it is currently [false]"));
    }
    {
        IndexMetadata indexMetadata = IndexMetadata.builder("follower-index").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "true").put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1s")).putCustom(CCR_METADATA_KEY, Collections.emptyMap()).putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))).numberOfShards(1).numberOfReplicas(0).build();
        ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")).metadata(Metadata.builder().put(indexMetadata, true).build()).build();
        WaitForIndexingCompleteStep stepToExecute = new WaitForIndexingCompleteStep(randomStepKey(), randomStepKey());
        StepKey nextKeyOnThresholdBreach = randomStepKey();
        ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, nextKeyOnThresholdBreach);
        ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState);
        assertThat(result.isComplete(), is(true));
        assertThat(result.getInfomationContext(), nullValue());
        assertThat(underTest.getNextStepKey(), is(not(nextKeyOnThresholdBreach)));
        assertThat(underTest.getNextStepKey(), is(stepToExecute.getNextStepKey()));
    }
    {
        IndexMetadata indexMetadata = IndexMetadata.builder("follower-index").settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE, "false").put(LifecycleSettings.LIFECYCLE_STEP_WAIT_TIME_THRESHOLD, "1h")).putCustom(CCR_METADATA_KEY, Collections.emptyMap()).putCustom(ILM_CUSTOM_METADATA_KEY, Map.of("step_time", String.valueOf(1234L))).numberOfShards(1).numberOfReplicas(0).build();
        ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")).metadata(Metadata.builder().put(indexMetadata, true).build()).build();
        StepKey currentStepKey = randomStepKey();
        WaitForIndexingCompleteStep stepToExecute = new WaitForIndexingCompleteStep(currentStepKey, randomStepKey());
        StepKey nextKeyOnThresholdBreach = randomStepKey();
        ClusterStateWaitUntilThresholdStep underTest = new ClusterStateWaitUntilThresholdStep(stepToExecute, nextKeyOnThresholdBreach);
        ClusterStateWaitStep.Result result = underTest.isConditionMet(indexMetadata.getIndex(), clusterState);
        assertThat(result.isComplete(), is(true));
        assertThat(result.getInfomationContext(), notNullValue());
        SingleMessageFieldInfo info = (SingleMessageFieldInfo) result.getInfomationContext();
        assertThat(info.getMessage(), equalTo("[" + currentStepKey.name() + "] lifecycle step, as part of [" + currentStepKey.action() + "] " + "action, for index [follower-index] executed for more than [1h]. Abandoning execution and moving to the next " + "fallback step [" + nextKeyOnThresholdBreach + "]"));
        assertThat(underTest.getNextStepKey(), is(nextKeyOnThresholdBreach));
    }
}
302559.617117elasticsearch
public void testDLS() throws Exception {
    ShardId shardId = new ShardId("_index", "_na_", 0);
    MappingLookup mappingLookup = createMappingLookup(List.of(new KeywordFieldType("field")));
    ScriptService scriptService = mock(ScriptService.class);
    final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
    final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext);
    final Authentication authentication = AuthenticationTestHelper.builder().build();
    new AuthenticationContextSerializer().writeToContext(authentication, threadContext);
    IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY);
    Client client = mock(Client.class);
    when(client.settings()).thenReturn(Settings.EMPTY);
    final long nowInMillis = randomNonNegativeLong();
    SearchExecutionContext realSearchExecutionContext = new SearchExecutionContext(shardId.id(), 0, indexSettings, null, null, null, mappingLookup, null, null, parserConfig(), writableRegistry(), client, null, () -> nowInMillis, null, null, () -> true, null, emptyMap(), MapperMetrics.NOOP);
    SearchExecutionContext searchExecutionContext = spy(realSearchExecutionContext);
    DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor());
    final MockLicenseState licenseState = mock(MockLicenseState.class);
    when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true);
    Directory directory = newDirectory();
    IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE));
    int numValues = scaledRandomIntBetween(2, 16);
    String[] values = new String[numValues];
    for (int i = 0; i < numValues; i++) {
        values[i] = "value" + i;
    }
    int[] valuesHitCount = new int[numValues];
    int numDocs = scaledRandomIntBetween(32, 128);
    int commitAfter = scaledRandomIntBetween(1, numDocs);
    logger.info("Going to index [{}] documents with [{}] unique values and commit after [{}] documents have been indexed", numDocs, numValues, commitAfter);
    for (int doc = 1; doc <= numDocs; doc++) {
        int valueIndex = (numValues - 1) % doc;
        Document document = new Document();
        String id = String.valueOf(doc);
        document.add(new StringField("id", id, Field.Store.NO));
        String value = values[valueIndex];
        document.add(new StringField("field", value, Field.Store.NO));
        iw.addDocument(document);
        if (doc % 11 == 0) {
            iw.deleteDocuments(new Term("id", id));
        } else {
            if (commitAfter % commitAfter == 0) {
                iw.commit();
            }
            valuesHitCount[valueIndex]++;
        }
    }
    iw.close();
    StringBuilder valueToHitCountOutput = new StringBuilder();
    for (int i = 0; i < numValues; i++) {
        valueToHitCountOutput.append(values[i]).append('\t').append(valuesHitCount[i]).append('\n');
    }
    logger.info("Value count matrix:\n{}", valueToHitCountOutput);
    DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
    for (int i = 0; i < numValues; i++) {
        String termQuery = "{\"term\": {\"field\": \"" + values[i] + "\"} }";
        IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.filteredBy(singleton(new BytesArray(termQuery))));
        SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> searchExecutionContext, bitsetCache, securityContext, licenseState, scriptService) {

            @Override
            protected IndicesAccessControl getIndicesAccessControl() {
                return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl));
            }
        };
        ParsedQuery parsedQuery = new ParsedQuery(new TermQuery(new Term("field", values[i])));
        when(searchExecutionContext.toQuery(new TermsQueryBuilder("field", values[i]))).thenReturn(parsedQuery);
        DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader);
        IndexSearcher indexSearcher = new ContextIndexSearcher(wrappedDirectoryReader, IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), true);
        int expectedHitCount = valuesHitCount[i];
        logger.info("Going to verify hit count with query [{}] with expected total hits [{}]", parsedQuery.query(), expectedHitCount);
        TotalHitCountCollector countCollector = new TotalHitCountCollector();
        indexSearcher.search(new MatchAllDocsQuery(), countCollector);
        assertThat(countCollector.getTotalHits(), equalTo(expectedHitCount));
        assertThat(wrappedDirectoryReader.numDocs(), equalTo(expectedHitCount));
    }
    bitsetCache.close();
    directoryReader.close();
    directory.close();
}
301687.547126elasticsearch
public void testDLSWithLimitedPermissions() throws Exception {
    ShardId shardId = new ShardId("_index", "_na_", 0);
    MappingLookup mappingLookup = createMappingLookup(List.of(new KeywordFieldType("field"), new KeywordFieldType("f1"), new KeywordFieldType("f2")));
    ScriptService scriptService = mock(ScriptService.class);
    final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
    final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext);
    final Authentication authentication = AuthenticationTestHelper.builder().build();
    new AuthenticationContextSerializer().writeToContext(authentication, threadContext);
    final boolean noFilteredIndexPermissions = randomBoolean();
    boolean restrictiveLimitedIndexPermissions = false;
    if (noFilteredIndexPermissions == false) {
        restrictiveLimitedIndexPermissions = randomBoolean();
    }
    Set<BytesReference> queries = new HashSet<>();
    queries.add(new BytesArray("{\"terms\" : { \"f2\" : [\"fv22\"] } }"));
    queries.add(new BytesArray("{\"terms\" : { \"f2\" : [\"fv32\"] } }"));
    IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.filteredBy(queries));
    queries = singleton(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv21\", \"fv31\"] } }"));
    if (restrictiveLimitedIndexPermissions) {
        queries = singleton(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv31\"] } }"));
    }
    IndicesAccessControl.IndexAccessControl limitedIndexAccessControl = new IndicesAccessControl.IndexAccessControl(FieldPermissions.DEFAULT, DocumentPermissions.filteredBy(queries));
    IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.builder().put(IndexSettings.ALLOW_UNMAPPED.getKey(), false).build());
    Client client = mock(Client.class);
    when(client.settings()).thenReturn(Settings.EMPTY);
    final long nowInMillis = randomNonNegativeLong();
    SearchExecutionContext realSearchExecutionContext = new SearchExecutionContext(shardId.id(), 0, indexSettings, null, null, null, mappingLookup, null, null, parserConfig(), writableRegistry(), client, null, () -> nowInMillis, null, null, () -> true, null, emptyMap(), MapperMetrics.NOOP);
    SearchExecutionContext searchExecutionContext = spy(realSearchExecutionContext);
    DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY, Executors.newSingleThreadExecutor());
    final MockLicenseState licenseState = mock(MockLicenseState.class);
    when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true);
    SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> searchExecutionContext, bitsetCache, securityContext, licenseState, scriptService) {

        @Override
        protected IndicesAccessControl getIndicesAccessControl() {
            IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, singletonMap("_index", indexAccessControl));
            if (noFilteredIndexPermissions) {
                return indicesAccessControl;
            }
            IndicesAccessControl limitedByIndicesAccessControl = new IndicesAccessControl(true, singletonMap("_index", limitedIndexAccessControl));
            return indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl);
        }
    };
    Directory directory = newDirectory();
    IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE));
    Document doc1 = new Document();
    doc1.add(new StringField("f1", "fv11", Store.NO));
    doc1.add(new StringField("f2", "fv12", Store.NO));
    iw.addDocument(doc1);
    Document doc2 = new Document();
    doc2.add(new StringField("f1", "fv21", Store.NO));
    doc2.add(new StringField("f2", "fv22", Store.NO));
    iw.addDocument(doc2);
    Document doc3 = new Document();
    doc3.add(new StringField("f1", "fv31", Store.NO));
    doc3.add(new StringField("f2", "fv32", Store.NO));
    iw.addDocument(doc3);
    iw.commit();
    iw.close();
    DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
    DirectoryReader wrappedDirectoryReader = wrapper.apply(directoryReader);
    IndexSearcher indexSearcher = new ContextIndexSearcher(wrappedDirectoryReader, IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), true);
    ScoreDoc[] hits = indexSearcher.search(new MatchAllDocsQuery(), 1000).scoreDocs;
    Set<Integer> actualDocIds = new HashSet<>();
    for (ScoreDoc doc : hits) {
        actualDocIds.add(doc.doc);
    }
    if (noFilteredIndexPermissions) {
        assertThat(actualDocIds, containsInAnyOrder(1, 2));
    } else {
        if (restrictiveLimitedIndexPermissions) {
            assertThat(actualDocIds, containsInAnyOrder(2));
        } else {
            assertThat(actualDocIds, containsInAnyOrder(1, 2));
        }
    }
    bitsetCache.close();
    directoryReader.close();
    directory.close();
}
30943.4421132elasticsearch
public void testWaitForCompletionConditionRemainsLocked() throws Exception {
    String testPolicyName = "test_policy";
    String testTaskId = randomAlphaOfLength(10) + ":" + randomIntBetween(100, 300);
    boolean completeWithResourceNotFound = randomBoolean();
    CountDownLatch clientBlockingLatch = new CountDownLatch(1);
    CountDownLatch secondGetTaskWasCalled = new CountDownLatch(1);
    CyclicBarrier getTaskActionBlockingBarrier = new CyclicBarrier(2);
    AtomicBoolean shouldGetTaskApiReturnTimeout = new AtomicBoolean(true);
    Client client = new NoOpClient(testThreadPool) {

        @Override
        protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action, Request request, ActionListener<Response> listener) {
            if (request instanceof InternalExecutePolicyAction.Request) {
                assertFalse(((InternalExecutePolicyAction.Request) request).isWaitForCompletion());
            }
            testThreadPool.generic().execute(() -> {
                try {
                    clientBlockingLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                }
                if (TransportGetTaskAction.TYPE.equals(action)) {
                    if (shouldGetTaskApiReturnTimeout.get() == false) {
                        secondGetTaskWasCalled.countDown();
                    }
                    try {
                        getTaskActionBlockingBarrier.await();
                    } catch (InterruptedException | BrokenBarrierException e) {
                        throw new RuntimeException(e);
                    }
                    if (shouldGetTaskApiReturnTimeout.getAndSet(false)) {
                        listener.onFailure(new ElasticsearchTimeoutException("Test call has timed out"));
                    } else if (completeWithResourceNotFound) {
                        listener.onFailure(new ElasticsearchException("Test wrapping", new ResourceNotFoundException("test")));
                    } else {
                        listener.onResponse(null);
                    }
                } else if (InternalExecutePolicyAction.INSTANCE.equals(action)) {
                    @SuppressWarnings("unchecked")
                    Response response = (Response) new ExecuteEnrichPolicyAction.Response(new TaskId(testTaskId));
                    listener.onResponse(response);
                } else {
                    listener.onResponse(null);
                }
            });
        }
    };
    final EnrichPolicyLocks enrichPolicyLocks = new EnrichPolicyLocks();
    final EnrichPolicyExecutor testExecutor = new EnrichPolicyExecutor(Settings.EMPTY, null, null, client, testThreadPool, TestIndexNameExpressionResolver.newInstance(testThreadPool.getThreadContext()), enrichPolicyLocks, ESTestCase::randomNonNegativeLong);
    PlainActionFuture<ExecuteEnrichPolicyAction.Response> firstTaskResult = new PlainActionFuture<>();
    testExecutor.coordinatePolicyExecution(new ExecuteEnrichPolicyAction.Request(testPolicyName).setWaitForCompletion(false), firstTaskResult);
    if (enrichPolicyLocks.lockedPolices().contains(testPolicyName) == false) {
        clientBlockingLatch.countDown();
        try {
            firstTaskResult.get(3, TimeUnit.SECONDS);
        } catch (Exception e) {
            logger.error("Encountered ignorable exception during test cleanup");
        }
        try {
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
        } catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
            logger.error("Encountered ignorable barrier wait exception during test cleanup");
        }
        fail("Enrich policy was not locked during task submission when it should have been");
    }
    clientBlockingLatch.countDown();
    try {
        ExecuteEnrichPolicyAction.Response response = firstTaskResult.actionGet();
        assertThat(response.getStatus(), is(nullValue()));
        assertThat(response.getTaskId(), is(notNullValue()));
    } catch (AssertionError e) {
        try {
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
        } catch (InterruptedException | BrokenBarrierException | TimeoutException be) {
            logger.error("Encountered ignorable barrier wait exception during test cleanup");
        }
        throw e;
    }
    if (enrichPolicyLocks.lockedPolices().contains(testPolicyName) == false) {
        try {
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
        } catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
            logger.error("Encountered ignorable barrier wait exception during test cleanup");
        }
        fail("Enrich policy was not locked after task response when it should have been");
    }
    try {
        getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
    } catch (BrokenBarrierException e) {
        throw new RuntimeException("Unexpected broken barrier exception", e);
    }
    try {
        assertTrue("Expected task API to be called a second time by the executor after first call timed out", secondGetTaskWasCalled.await(3, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        Assert.fail("Thread interrupted while waiting for background executor to call task API");
    }
    if (enrichPolicyLocks.lockedPolices().contains(testPolicyName) == false) {
        try {
            getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
        } catch (InterruptedException | BrokenBarrierException | TimeoutException e) {
            logger.error("Encountered ignorable barrier wait exception during test cleanup");
        }
        fail("Enrich policy was not locked after timeout when it should have been");
    }
    try {
        getTaskActionBlockingBarrier.await(3, TimeUnit.SECONDS);
    } catch (BrokenBarrierException e) {
        throw new RuntimeException("Unexpected broken barrier exception", e);
    }
    assertBusy(() -> assertFalse(enrichPolicyLocks.lockedPolices().contains(testPolicyName)), 3, TimeUnit.SECONDS);
}
301595.489121elasticsearch
public void testGroupingWithOrdinals() throws Exception {
    DriverContext driverContext = driverContext();
    BlockFactory blockFactory = driverContext.blockFactory();
    final String gField = "g";
    final int numDocs = 2856;
    final Map<BytesRef, Long> expectedCounts = new HashMap<>();
    int keyLength = randomIntBetween(1, 10);
    try (BaseDirectoryWrapper dir = newDirectory();
        RandomIndexWriter writer = new RandomIndexWriter(random(), dir)) {
        for (int i = 0; i < numDocs; i++) {
            Document doc = new Document();
            BytesRef key = new BytesRef(randomByteArrayOfLength(keyLength));
            SortedSetDocValuesField docValuesField = new SortedSetDocValuesField(gField, key);
            doc.add(docValuesField);
            writer.addDocument(doc);
            expectedCounts.compute(key, (k, v) -> v == null ? 1 : v + 1);
        }
        writer.commit();
        Map<BytesRef, Long> actualCounts = new HashMap<>();
        boolean shuffleDocs = randomBoolean();
        Operator shuffleDocsOperator = new AbstractPageMappingOperator() {

            @Override
            protected Page process(Page page) {
                if (shuffleDocs == false) {
                    return page;
                }
                DocVector docVector = (DocVector) page.getBlock(0).asVector();
                int positionCount = docVector.getPositionCount();
                IntVector shards = null;
                IntVector segments = null;
                IntVector docs = null;
                try (IntVector.Builder shardsBuilder = blockFactory.newIntVectorBuilder(positionCount);
                    IntVector.Builder segmentsBuilder = blockFactory.newIntVectorBuilder(positionCount);
                    IntVector.Builder docsBuilder = blockFactory.newIntVectorBuilder(positionCount)) {
                    List<Integer> docIds = new ArrayList<>(positionCount);
                    for (int i = 0; i < positionCount; i++) {
                        shardsBuilder.appendInt(docVector.shards().getInt(i));
                        segmentsBuilder.appendInt(docVector.segments().getInt(i));
                        docIds.add(docVector.docs().getInt(i));
                    }
                    shards = shardsBuilder.build();
                    segments = segmentsBuilder.build();
                    Collections.shuffle(docIds, random());
                    for (Integer d : docIds) {
                        docsBuilder.appendInt(d);
                    }
                    docs = docsBuilder.build();
                } finally {
                    if (docs == null) {
                        Releasables.closeExpectNoException(docVector, shards, segments);
                    } else {
                        Releasables.closeExpectNoException(docVector);
                    }
                }
                Block[] blocks = new Block[page.getBlockCount()];
                blocks[0] = new DocVector(shards, segments, docs, false).asBlock();
                for (int i = 1; i < blocks.length; i++) {
                    blocks[i] = page.getBlock(i);
                }
                return new Page(blocks);
            }

            @Override
            public String toString() {
                return "ShuffleDocs";
            }
        };
        try (DirectoryReader reader = writer.getReader()) {
            Driver driver = new Driver(driverContext, luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT).get(driverContext), List.of(shuffleDocsOperator, new AbstractPageMappingOperator() {

                @Override
                protected Page process(Page page) {
                    return page.appendBlock(driverContext.blockFactory().newConstantIntBlockWith(1, page.getPositionCount()));
                }

                @Override
                public String toString() {
                    return "Add(1)";
                }
            }, new OrdinalsGroupingOperator(shardIdx -> new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), ElementType.BYTES_REF, 0, gField, List.of(CountAggregatorFunction.supplier(List.of(1)).groupingAggregatorFactory(INITIAL)), randomPageSize(), driverContext), new HashAggregationOperator(List.of(CountAggregatorFunction.supplier(List.of(1, 2)).groupingAggregatorFactory(FINAL)), () -> BlockHash.build(List.of(new BlockHash.GroupSpec(0, ElementType.BYTES_REF)), driverContext.blockFactory(), randomPageSize(), false), driverContext)), new PageConsumerOperator(page -> {
                BytesRefBlock keys = page.getBlock(0);
                LongBlock counts = page.getBlock(1);
                for (int i = 0; i < keys.getPositionCount(); i++) {
                    BytesRef spare = new BytesRef();
                    keys.getBytesRef(i, spare);
                    actualCounts.put(BytesRef.deepCopyOf(spare), counts.getLong(i));
                }
                page.releaseBlocks();
            }), () -> {
            });
            OperatorTestCase.runDriver(driver);
            assertThat(actualCounts, equalTo(expectedCounts));
            assertDriverContext(driverContext);
            org.elasticsearch.common.util.MockBigArrays.ensureAllArraysAreReleased();
        }
    }
    assertThat(blockFactory.breaker().getUsed(), equalTo(0L));
}
302505.051797elasticsearch
public void testBytesRef() {
    BlockFactory blockFactory = blockFactory();
    Map<Integer, List<BytesRef>> inputValues = new HashMap<>();
    int numPages = between(0, 10);
    int maxPosition = between(0, 100);
    var resultBuilder = EnrichResultBuilder.enrichResultBuilder(ElementType.BYTES_REF, blockFactory, 0);
    for (int i = 0; i < numPages; i++) {
        int numRows = between(1, 100);
        try (var positionsBuilder = blockFactory.newIntVectorBuilder(numRows);
            var valuesBuilder = blockFactory.newBytesRefBlockBuilder(numRows)) {
            for (int r = 0; r < numRows; r++) {
                int position = between(0, maxPosition);
                positionsBuilder.appendInt(position);
                int numValues = between(0, 3);
                if (numValues == 0) {
                    valuesBuilder.appendNull();
                }
                if (numValues > 1) {
                    valuesBuilder.beginPositionEntry();
                }
                for (int v = 0; v < numValues; v++) {
                    BytesRef val = new BytesRef(randomByteArrayOfLength(10));
                    inputValues.computeIfAbsent(position, k -> new ArrayList<>()).add(val);
                    valuesBuilder.appendBytesRef(val);
                }
                if (numValues > 1) {
                    valuesBuilder.endPositionEntry();
                }
            }
            try (var positions = positionsBuilder.build();
                var valuesBlock = valuesBuilder.build()) {
                resultBuilder.addInputPage(positions, new Page(valuesBlock));
            }
        }
    }
    try (IntVector selected = IntVector.range(0, maxPosition + 1, blockFactory)) {
        try (BytesRefBlock actualOutput = (BytesRefBlock) resultBuilder.build(selected.asBlock())) {
            assertThat(actualOutput.getPositionCount(), equalTo(maxPosition + 1));
            for (int i = 0; i < actualOutput.getPositionCount(); i++) {
                List<BytesRef> values = inputValues.get(i);
                if (actualOutput.isNull(i)) {
                    assertNull(values);
                } else {
                    int valueCount = actualOutput.getValueCount(i);
                    int first = actualOutput.getFirstValueIndex(i);
                    assertThat(valueCount, equalTo(values.size()));
                    for (int v = 0; v < valueCount; v++) {
                        assertThat(actualOutput.getBytesRef(first + v, new BytesRef()), equalTo(values.get(v)));
                    }
                }
            }
        }
    }
    try (IntBlock.Builder selectedBuilder = blockFactory.newIntBlockBuilder(between(1, 10))) {
        int selectedPositions = between(1, 100);
        Map<Integer, List<BytesRef>> expectedValues = new HashMap<>();
        for (int i = 0; i < selectedPositions; i++) {
            int ps = randomIntBetween(0, 3);
            List<BytesRef> values = new ArrayList<>();
            if (ps == 0) {
                selectedBuilder.appendNull();
            } else {
                selectedBuilder.beginPositionEntry();
                for (int p = 0; p < ps; p++) {
                    int position = randomIntBetween(0, maxPosition);
                    selectedBuilder.appendInt(position);
                    values.addAll(inputValues.getOrDefault(position, List.of()));
                }
                selectedBuilder.endPositionEntry();
            }
            if (values.isEmpty()) {
                expectedValues.put(i, null);
            } else {
                expectedValues.put(i, values);
            }
        }
        try (var selected = selectedBuilder.build();
            BytesRefBlock actualOutput = (BytesRefBlock) resultBuilder.build(selected)) {
            assertThat(actualOutput.getPositionCount(), equalTo(selected.getPositionCount()));
            for (int i = 0; i < actualOutput.getPositionCount(); i++) {
                List<BytesRef> values = expectedValues.get(i);
                if (actualOutput.isNull(i)) {
                    assertNull(values);
                } else {
                    int valueCount = actualOutput.getValueCount(i);
                    int first = actualOutput.getFirstValueIndex(i);
                    assertThat(valueCount, equalTo(values.size()));
                    for (int v = 0; v < valueCount; v++) {
                        assertThat(actualOutput.getBytesRef(first + v, new BytesRef()), equalTo(values.get(v)));
                    }
                }
            }
        }
    }
    resultBuilder.close();
    assertThat(blockFactory.breaker().getUsed(), equalTo(0L));
}
303290.01099elasticsearch
public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) {
    String policy1 = randomAlphaOfLengthBetween(1, 20);
    Step.StepKey i1currentStepKey = randomStepKey();
    final Step i1mockStep;
    if (useOnMaster) {
        i1mockStep = new IndexLifecycleRunnerTests.MockAsyncActionStep(i1currentStepKey, randomStepKey());
    } else {
        i1mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i1currentStepKey, randomStepKey());
    }
    MockAction i1mockAction = new MockAction(Collections.singletonList(i1mockStep));
    Phase i1phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", i1mockAction));
    LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i1phase.getName(), i1phase));
    Index index1 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
    LifecycleExecutionState.Builder i1lifecycleState = LifecycleExecutionState.builder();
    i1lifecycleState.setPhase(i1currentStepKey.phase());
    i1lifecycleState.setAction(i1currentStepKey.action());
    i1lifecycleState.setStep(i1currentStepKey.name());
    String policy2 = randomValueOtherThan(policy1, () -> randomAlphaOfLengthBetween(1, 20));
    Step.StepKey i2currentStepKey = randomStepKey();
    final Step i2mockStep;
    if (useOnMaster) {
        i2mockStep = new IndexLifecycleRunnerTests.MockAsyncActionStep(i2currentStepKey, randomStepKey());
    } else {
        i2mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i2currentStepKey, randomStepKey());
    }
    MockAction mockAction = new MockAction(Collections.singletonList(i2mockStep));
    Phase i2phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
    LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i2phase.getName(), i1phase));
    Index index2 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
    LifecycleExecutionState.Builder i2lifecycleState = LifecycleExecutionState.builder();
    i2lifecycleState.setPhase(i2currentStepKey.phase());
    i2lifecycleState.setAction(i2currentStepKey.action());
    i2lifecycleState.setStep(i2currentStepKey.name());
    CountDownLatch stepLatch = new CountDownLatch(2);
    boolean failStep1 = randomBoolean();
    if (useOnMaster) {
        ((IndexLifecycleRunnerTests.MockAsyncActionStep) i1mockStep).setLatch(stepLatch);
        ((IndexLifecycleRunnerTests.MockAsyncActionStep) i1mockStep).setException(failStep1 ? new IllegalArgumentException("forcing a failure for index 1") : null);
        ((IndexLifecycleRunnerTests.MockAsyncActionStep) i2mockStep).setLatch(stepLatch);
        ((IndexLifecycleRunnerTests.MockAsyncActionStep) i2mockStep).setException(failStep1 ? null : new IllegalArgumentException("forcing a failure for index 2"));
    } else {
        ((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setLatch(stepLatch);
        ((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setException(failStep1 ? new IllegalArgumentException("forcing a failure for index 1") : null);
        ((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setLatch(stepLatch);
        ((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setException(failStep1 ? null : new IllegalArgumentException("forcing a failure for index 2"));
    }
    SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
    policyMap.put(policy1, new LifecyclePolicyMetadata(i1policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()));
    policyMap.put(policy2, new LifecyclePolicyMetadata(i2policy, Collections.emptyMap(), randomNonNegativeLong(), randomNonNegativeLong()));
    IndexMetadata i1indexMetadata = IndexMetadata.builder(index1.getName()).settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policy1)).putCustom(ILM_CUSTOM_METADATA_KEY, i1lifecycleState.build().asMap()).numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
    IndexMetadata i2indexMetadata = IndexMetadata.builder(index2.getName()).settings(settings(IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policy1)).putCustom(ILM_CUSTOM_METADATA_KEY, i2lifecycleState.build().asMap()).numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
    Map<String, IndexMetadata> indices = Map.of(index1.getName(), i1indexMetadata, index2.getName(), i2indexMetadata);
    Metadata metadata = Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING)).indices(indices).persistentSettings(settings(IndexVersion.current()).build()).build();
    ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()).build();
    if (useOnMaster) {
        when(clusterService.state()).thenReturn(currentState);
        indexLifecycleService.onMaster(currentState);
    } else {
        indexLifecycleService.triggerPolicies(currentState, randomBoolean());
    }
    try {
        stepLatch.await(5, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        logger.error("failure while waiting for step execution", e);
        fail("both steps should have been executed, even with an exception");
    }
}
304004.77499elasticsearch
public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Exception {
    Index index = new Index("test", "uuid");
    Client client = mock(Client.class);
    Mockito.when(client.settings()).thenReturn(Settings.EMPTY);
    String policyName = randomAlphaOfLength(5);
    Map<String, LifecycleAction> actions = new HashMap<>();
    actions.put("shrink", new ShrinkAction(1, null, false));
    Map<String, Phase> phases = new HashMap<>();
    Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions);
    PhaseExecutionInfo pei = new PhaseExecutionInfo(policyName, warmPhase, 1, randomNonNegativeLong());
    String phaseJson = Strings.toString(pei);
    phases.put("warm", new Phase("warm", TimeValue.ZERO, actions));
    LifecyclePolicy newPolicy = new LifecyclePolicy(policyName, phases);
    actions = new HashMap<>();
    actions.put("shrink", new ShrinkAction(2, null, false));
    phases = new HashMap<>();
    phases.put("warm", new Phase("warm", TimeValue.ZERO, actions));
    LifecyclePolicy updatedPolicy = new LifecyclePolicy(policyName, phases);
    logger.info("--> policy: {}", newPolicy);
    logger.info("--> updated policy: {}", updatedPolicy);
    List<Step> policySteps = newPolicy.toSteps(client, null);
    Map<String, String> headers = new HashMap<>();
    if (randomBoolean()) {
        headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10));
        headers.put(randomAlphaOfLength(10), randomAlphaOfLength(10));
    }
    Map<String, LifecyclePolicyMetadata> policyMap = Collections.singletonMap(newPolicy.getName(), new LifecyclePolicyMetadata(newPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()));
    IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING);
    LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
    lifecycleState.setPhase("warm");
    lifecycleState.setPhaseDefinition(phaseJson);
    Metadata metadata = Metadata.builder().persistentSettings(settings(IndexVersion.current()).build()).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata).put(IndexMetadata.builder("test").settings(indexSettings(1, 0).put("index.uuid", "uuid").put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).put(LifecycleSettings.LIFECYCLE_NAME, policyName)).putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())).build();
    try (XContentBuilder builder = JsonXContent.contentBuilder()) {
        builder.startObject();
        ChunkedToXContent.wrapAsToXContent(metadata).toXContent(builder, ToXContent.EMPTY_PARAMS);
        builder.endObject();
        logger.info("--> metadata: {}", Strings.toString(builder));
    }
    String nodeId = randomAlphaOfLength(10);
    DiscoveryNode masterNode = DiscoveryNodeUtils.builder(nodeId).applySettings(NodeRoles.masterNode(settings(IndexVersion.current()).build())).address(new TransportAddress(TransportAddress.META_ADDRESS, 9300)).build();
    ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()).build();
    PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client, null);
    registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE));
    Map<Step.StepKey, Step> registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName());
    Step shrinkStep = registeredStepsForPolicy.entrySet().stream().filter(e -> e.getKey().phase().equals("warm") && e.getKey().name().equals("shrink")).findFirst().get().getValue();
    Step gotStep = registry.getStep(metadata.index(index), shrinkStep.getKey());
    assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(1));
    assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1));
    policyMap = Collections.singletonMap(updatedPolicy.getName(), new LifecyclePolicyMetadata(updatedPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()));
    lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING);
    metadata = Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata).build();
    try (XContentBuilder builder = JsonXContent.contentBuilder()) {
        builder.startObject();
        ChunkedToXContent.wrapAsToXContent(metadata).toXContent(builder, ToXContent.EMPTY_PARAMS);
        builder.endObject();
        logger.info("--> metadata: {}", Strings.toString(builder));
    }
    currentState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build();
    registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE));
    registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName());
    shrinkStep = registeredStepsForPolicy.entrySet().stream().filter(e -> e.getKey().phase().equals("warm") && e.getKey().name().equals("shrink")).findFirst().get().getValue();
    gotStep = registry.getStep(metadata.index(index), shrinkStep.getKey());
    assertThat(((ShrinkStep) shrinkStep).getNumberOfShards(), equalTo(2));
    assertThat(((ShrinkStep) gotStep).getNumberOfShards(), equalTo(1));
}
302206.751123elasticsearch
public void testUpdate() throws Exception {
    String configId = "config-id";
    DataFrameAnalyticsConfig initialConfig = DataFrameAnalyticsConfigTests.createRandom(configId);
    {
        AtomicReference<DataFrameAnalyticsConfig> configHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        blockingCall(actionListener -> configProvider.put(initialConfig, emptyMap(), TIMEOUT, actionListener), configHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(configHolder.get(), is(notNullValue()));
        assertThat(configHolder.get(), is(equalTo(initialConfig)));
    }
    {
        AtomicReference<DataFrameAnalyticsConfig> updatedConfigHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).setDescription("description-1").build();
        blockingCall(actionListener -> configProvider.update(configUpdate, emptyMap(), ClusterState.EMPTY_STATE, actionListener), updatedConfigHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(updatedConfigHolder.get(), is(notNullValue()));
        assertThat(updatedConfigHolder.get(), is(equalTo(new DataFrameAnalyticsConfig.Builder(initialConfig).setDescription("description-1").build())));
    }
    {
        AtomicReference<DataFrameAnalyticsConfig> updatedConfigHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).setModelMemoryLimit(ByteSizeValue.ofBytes(1024)).build();
        blockingCall(actionListener -> configProvider.update(configUpdate, emptyMap(), ClusterState.EMPTY_STATE, actionListener), updatedConfigHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(updatedConfigHolder.get(), is(notNullValue()));
        assertThat(updatedConfigHolder.get(), is(equalTo(new DataFrameAnalyticsConfig.Builder(initialConfig).setDescription("description-1").setModelMemoryLimit(ByteSizeValue.ofBytes(1024)).build())));
    }
    {
        AtomicReference<DataFrameAnalyticsConfig> updatedConfigHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).build();
        blockingCall(actionListener -> configProvider.update(configUpdate, emptyMap(), ClusterState.EMPTY_STATE, actionListener), updatedConfigHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(updatedConfigHolder.get(), is(notNullValue()));
        assertThat(updatedConfigHolder.get(), is(equalTo(new DataFrameAnalyticsConfig.Builder(initialConfig).setDescription("description-1").setModelMemoryLimit(ByteSizeValue.ofBytes(1024)).build())));
    }
    {
        AtomicReference<DataFrameAnalyticsConfig> updatedConfigHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).setDescription("description-2").setModelMemoryLimit(ByteSizeValue.ofBytes(2048)).build();
        blockingCall(actionListener -> configProvider.update(configUpdate, emptyMap(), ClusterState.EMPTY_STATE, actionListener), updatedConfigHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(updatedConfigHolder.get(), is(notNullValue()));
        assertThat(updatedConfigHolder.get(), is(equalTo(new DataFrameAnalyticsConfig.Builder(initialConfig).setDescription("description-2").setModelMemoryLimit(ByteSizeValue.ofBytes(2048)).build())));
    }
    {
        Map<String, String> securityHeaders = Collections.singletonMap("_xpack_security_authentication", dummyAuthenticationHeader);
        AtomicReference<DataFrameAnalyticsConfig> updatedConfigHolder = new AtomicReference<>();
        AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
        DataFrameAnalyticsConfigUpdate configUpdate = new DataFrameAnalyticsConfigUpdate.Builder(configId).build();
        blockingCall(actionListener -> configProvider.update(configUpdate, securityHeaders, ClusterState.EMPTY_STATE, actionListener), updatedConfigHolder, exceptionHolder);
        assertNoException(exceptionHolder);
        assertThat(updatedConfigHolder.get(), is(notNullValue()));
        assertThat(updatedConfigHolder.get(), is(equalTo(new DataFrameAnalyticsConfig.Builder(initialConfig).setDescription("description-2").setModelMemoryLimit(ByteSizeValue.ofBytes(2048)).setHeaders(securityHeaders).build())));
    }
}
301902.4411118elasticsearch
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) {
    if (MachineLearningField.ML_API_FEATURE.check(licenseState) == false) {
        listener.onFailure(LicenseUtils.newComplianceException(XPackField.MACHINE_LEARNING));
        return;
    }
    if (TransportVersionUtils.isMinTransportVersionSameAsCurrent(state) == false) {
        listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot upgrade job [{}] snapshot [{}] while cluster upgrade is in progress.", request.getJobId(), request.getSnapshotId()));
        return;
    }
    PersistentTasksCustomMetadata customMetadata = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE);
    if (customMetadata != null && (customMetadata.findTasks(MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, t -> t.getParams() instanceof SnapshotUpgradeTaskParams && ((SnapshotUpgradeTaskParams) t.getParams()).getJobId().equals(request.getJobId())).isEmpty() == false)) {
        listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot upgrade job [{}] snapshot [{}] as there is currently a snapshot for this job being upgraded", request.getJobId(), request.getSnapshotId()));
        return;
    }
    final SnapshotUpgradeTaskParams params = new SnapshotUpgradeTaskParams(request.getJobId(), request.getSnapshotId());
    ActionListener<PersistentTask<SnapshotUpgradeTaskParams>> waitForJobToStart = ActionListener.wrap(persistentTask -> waitForJobStarted(persistentTask.getId(), params, request, listener), e -> {
        if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) {
            e = ExceptionsHelper.conflictStatusException("Cannot upgrade job [{}] snapshot [{}] because upgrade is already in progress", e, request.getJobId(), request.getSnapshotId());
        }
        listener.onFailure(e);
    });
    ActionListener<Boolean> configIndexMappingUpdaterListener = ActionListener.wrap(_unused -> {
        logger.info("[{}] [{}] sending start upgrade request", params.getJobId(), params.getSnapshotId());
        persistentTasksService.sendStartRequest(MlTasks.snapshotUpgradeTaskId(params.getJobId(), params.getSnapshotId()), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, params, null, waitForJobToStart);
    }, listener::onFailure);
    ActionListener<Long> memoryRequirementRefreshListener = ActionListener.wrap(mem -> ElasticsearchMappings.addDocMappingIfMissing(MlConfigIndex.indexName(), MlConfigIndex::mapping, client, state, request.masterNodeTimeout(), configIndexMappingUpdaterListener, MlConfigIndex.CONFIG_INDEX_MAPPINGS_VERSION), listener::onFailure);
    ActionListener<Result<ModelSnapshot>> getSnapshotHandler = ActionListener.wrap(response -> {
        if (response == null) {
            listener.onFailure(new ResourceNotFoundException(Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, request.getSnapshotId(), request.getJobId())));
            return;
        }
        if (MlConfigVersion.CURRENT.equals(response.result.getMinVersion())) {
            listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot upgrade job [{}] snapshot [{}] as it is already compatible with current version {}", request.getJobId(), request.getSnapshotId(), MlConfigVersion.CURRENT));
            return;
        }
        memoryTracker.refreshAnomalyDetectorJobMemoryAndAllOthers(params.getJobId(), memoryRequirementRefreshListener);
    }, listener::onFailure);
    ActionListener<Job> getJobHandler = ActionListener.wrap(job -> {
        if (request.getSnapshotId().equals(job.getModelSnapshotId()) && (JobState.CLOSED.equals(MlTasks.getJobState(request.getJobId(), customMetadata)) == false)) {
            listener.onFailure(ExceptionsHelper.conflictStatusException("Cannot upgrade snapshot [{}] for job [{}] as it is the current primary job snapshot and the job's state is [{}]", request.getSnapshotId(), request.getJobId(), MlTasks.getJobState(request.getJobId(), customMetadata)));
            return;
        }
        jobResultsProvider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), false, getSnapshotHandler::onResponse, getSnapshotHandler::onFailure);
    }, listener::onFailure);
    jobConfigProvider.getJob(request.getJobId(), null, ActionListener.wrap(builder -> getJobHandler.onResponse(builder.build()), listener::onFailure));
}
302538.1616100elasticsearch
public void expandIds(String idExpression, boolean allowNoResources, PageParams pageParams, Set<String> tags, ModelAliasMetadata modelAliasMetadata, @Nullable TaskId parentTaskId, Set<String> previouslyMatchedIds, ActionListener<Tuple<Long, Map<String, Set<String>>>> idsListener) {
    String[] tokens = Strings.tokenizeToStringArray(idExpression, ",");
    Set<String> expandedIdsFromAliases = new HashSet<>();
    if (Strings.isAllOrWildcard(tokens) == false) {
        for (String token : tokens) {
            if (Regex.isSimpleMatchPattern(token)) {
                for (String modelAlias : modelAliasMetadata.modelAliases().keySet()) {
                    if (Regex.simpleMatch(token, modelAlias)) {
                        expandedIdsFromAliases.add(modelAliasMetadata.getModelId(modelAlias));
                    }
                }
            } else if (modelAliasMetadata.getModelId(token) != null) {
                expandedIdsFromAliases.add(modelAliasMetadata.getModelId(token));
            }
        }
    }
    Set<String> matchedResourceIds = matchedResourceIds(tokens);
    Set<String> foundResourceIds;
    if (tags.isEmpty()) {
        foundResourceIds = matchedResourceIds;
    } else {
        foundResourceIds = new HashSet<>();
        for (String resourceId : matchedResourceIds) {
            if (Sets.newHashSet(loadModelFromResource(resourceId, true).build().getTags()).containsAll(tags)) {
                foundResourceIds.add(resourceId);
            }
        }
    }
    expandedIdsFromAliases.addAll(Arrays.asList(tokens));
    String[] tokensForQuery = expandedIdsFromAliases.toArray(new String[0]);
    SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().sort(SortBuilders.fieldSort(TrainedModelConfig.MODEL_ID.getPreferredName()).unmappedType("long")).query(buildExpandIdsQuery(tokensForQuery, tags)).from(Math.max(0, pageParams.getFrom() - foundResourceIds.size())).size(Math.min(10_000, pageParams.getSize() + foundResourceIds.size()));
    sourceBuilder.trackTotalHits(true).fetchSource(TrainedModelConfig.MODEL_ID.getPreferredName(), null);
    IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS;
    SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN).indicesOptions(IndicesOptions.fromOptions(true, indicesOptions.allowNoIndices(), indicesOptions.expandWildcardsOpen(), indicesOptions.expandWildcardsClosed(), indicesOptions)).source(sourceBuilder);
    if (parentTaskId != null) {
        searchRequest.setParentTask(parentTaskId);
    }
    executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, ActionListener.<SearchResponse>wrap(response -> {
        long totalHitCount = response.getHits().getTotalHits().value + foundResourceIds.size();
        Set<String> foundFromDocs = new HashSet<>();
        for (SearchHit hit : response.getHits().getHits()) {
            Map<String, Object> docSource = hit.getSourceAsMap();
            if (docSource == null) {
                continue;
            }
            Object idValue = docSource.get(TrainedModelConfig.MODEL_ID.getPreferredName());
            if (idValue instanceof String) {
                foundFromDocs.add(idValue.toString());
            }
        }
        Map<String, Set<String>> allFoundIds = collectIds(pageParams, foundResourceIds, foundFromDocs).stream().collect(Collectors.toMap(Function.identity(), k -> new HashSet<>()));
        Set<String> matchedTokens = new HashSet<>(allFoundIds.keySet());
        modelAliasMetadata.modelAliases().forEach((alias, modelIdEntry) -> {
            final String modelId = modelIdEntry.getModelId();
            if (allFoundIds.containsKey(modelId)) {
                allFoundIds.get(modelId).add(alias);
                matchedTokens.add(alias);
            }
        });
        ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoResources);
        requiredMatches.filterMatchedIds(matchedTokens);
        requiredMatches.filterMatchedIds(previouslyMatchedIds);
        if (requiredMatches.hasUnmatchedIds()) {
            idsListener.onFailure(ExceptionsHelper.missingTrainedModel(requiredMatches.unmatchedIdsString()));
        } else {
            idsListener.onResponse(Tuple.tuple(totalHitCount, allFoundIds));
        }
    }, idsListener::onFailure), client::search);
}
303053.097104elasticsearch
public List<RestHandler> getRestHandlers(Settings unused, NamedWriteableRegistry namedWriteableRegistry, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<DiscoveryNodes> nodesInCluster, Predicate<NodeFeature> clusterSupportsFeature) {
    if (false == enabled) {
        return List.of();
    }
    List<RestHandler> restHandlers = new ArrayList<>();
    restHandlers.add(new RestMlInfoAction());
    restHandlers.add(new RestMlMemoryAction());
    restHandlers.add(new RestSetUpgradeModeAction());
    if (machineLearningExtension.get().isAnomalyDetectionEnabled()) {
        restHandlers.add(new RestGetJobsAction());
        restHandlers.add(new RestGetJobStatsAction());
        restHandlers.add(new RestPutJobAction());
        restHandlers.add(new RestPostJobUpdateAction());
        restHandlers.add(new RestDeleteJobAction());
        restHandlers.add(new RestOpenJobAction());
        restHandlers.add(new RestGetFiltersAction());
        restHandlers.add(new RestPutFilterAction());
        restHandlers.add(new RestUpdateFilterAction());
        restHandlers.add(new RestDeleteFilterAction());
        restHandlers.add(new RestGetInfluencersAction());
        restHandlers.add(new RestGetRecordsAction());
        restHandlers.add(new RestGetBucketsAction());
        restHandlers.add(new RestGetOverallBucketsAction());
        restHandlers.add(new RestPostDataAction());
        restHandlers.add(new RestCloseJobAction());
        restHandlers.add(new RestFlushJobAction());
        restHandlers.add(new RestResetJobAction());
        restHandlers.add(new RestValidateDetectorAction());
        restHandlers.add(new RestValidateJobConfigAction());
        restHandlers.add(new RestEstimateModelMemoryAction());
        restHandlers.add(new RestGetCategoriesAction());
        restHandlers.add(new RestGetModelSnapshotsAction());
        restHandlers.add(new RestRevertModelSnapshotAction());
        restHandlers.add(new RestUpdateModelSnapshotAction());
        restHandlers.add(new RestGetDatafeedsAction());
        restHandlers.add(new RestGetDatafeedStatsAction());
        restHandlers.add(new RestPutDatafeedAction());
        restHandlers.add(new RestUpdateDatafeedAction());
        restHandlers.add(new RestDeleteDatafeedAction());
        restHandlers.add(new RestPreviewDatafeedAction());
        restHandlers.add(new RestStartDatafeedAction());
        restHandlers.add(new RestStopDatafeedAction());
        restHandlers.add(new RestDeleteModelSnapshotAction());
        restHandlers.add(new RestForecastJobAction());
        restHandlers.add(new RestDeleteForecastAction());
        restHandlers.add(new RestGetCalendarsAction());
        restHandlers.add(new RestPutCalendarAction());
        restHandlers.add(new RestDeleteCalendarAction());
        restHandlers.add(new RestDeleteCalendarEventAction());
        restHandlers.add(new RestDeleteCalendarJobAction());
        restHandlers.add(new RestPutCalendarJobAction());
        restHandlers.add(new RestGetCalendarEventsAction());
        restHandlers.add(new RestPostCalendarEventAction());
        restHandlers.add(new RestUpgradeJobModelSnapshotAction());
        restHandlers.add(new RestGetJobModelSnapshotsUpgradeStatsAction());
        restHandlers.add(new RestDeleteExpiredDataAction());
        restHandlers.add(new RestCatJobsAction());
        restHandlers.add(new RestCatDatafeedsAction());
    }
    if (machineLearningExtension.get().isDataFrameAnalyticsEnabled() || machineLearningExtension.get().isNlpEnabled()) {
        restHandlers.add(new RestGetTrainedModelsAction());
        restHandlers.add(new RestDeleteTrainedModelAction());
        restHandlers.add(new RestGetTrainedModelsStatsAction());
        restHandlers.add(new RestPutTrainedModelAction());
        restHandlers.add(new RestPutTrainedModelAliasAction());
        restHandlers.add(new RestDeleteTrainedModelAliasAction());
        restHandlers.add(new RestPutTrainedModelDefinitionPartAction());
        restHandlers.add(new RestInferTrainedModelAction());
        restHandlers.add(new RestCatTrainedModelsAction());
        if (machineLearningExtension.get().isDataFrameAnalyticsEnabled()) {
            restHandlers.add(new RestGetDataFrameAnalyticsAction());
            restHandlers.add(new RestGetDataFrameAnalyticsStatsAction());
            restHandlers.add(new RestPutDataFrameAnalyticsAction());
            restHandlers.add(new RestPostDataFrameAnalyticsUpdateAction());
            restHandlers.add(new RestDeleteDataFrameAnalyticsAction());
            restHandlers.add(new RestStartDataFrameAnalyticsAction());
            restHandlers.add(new RestStopDataFrameAnalyticsAction());
            restHandlers.add(new RestEvaluateDataFrameAction());
            restHandlers.add(new RestExplainDataFrameAnalyticsAction());
            restHandlers.add(new RestPreviewDataFrameAnalyticsAction());
            restHandlers.add(new RestCatDataFrameAnalyticsAction());
        }
        if (machineLearningExtension.get().isNlpEnabled()) {
            restHandlers.add(new RestStartTrainedModelDeploymentAction());
            restHandlers.add(new RestStopTrainedModelDeploymentAction());
            restHandlers.add(new RestInferTrainedModelDeploymentAction());
            restHandlers.add(new RestUpdateTrainedModelDeploymentAction());
            restHandlers.add(new RestPutTrainedModelVocabularyAction());
            restHandlers.add(new RestClearDeploymentCacheAction());
        }
    }
    return restHandlers;
}
301545.01151elasticsearch
public void testCheckIfJobsCanBeMovedInLeastEfficientWayProcessorsAndMemory() {
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 2)), Map.of("node_a", MlJobRequirements.of(100L, 2)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(10L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 2)), Map.of("node_a", MlJobRequirements.of(100L, 2)), 1000L, 3, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(10L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 1)), Map.of("node_a", MlJobRequirements.of(995L, 2)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1)), Map.of("node_a", MlJobRequirements.of(980L, 3), "node_b", MlJobRequirements.of(980L, 1), "node_c", MlJobRequirements.of(970L, 0)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(0L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 4), MlJobRequirements.of(10L, 3), MlJobRequirements.of(10L, 2), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(900L, 3), "node_b", MlJobRequirements.of(920L, 1), "node_c", MlJobRequirements.of(940L, 0), "node_d", MlJobRequirements.of(960L, 0)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(30L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 4), MlJobRequirements.of(10L, 3), MlJobRequirements.of(10L, 2), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 0)), Map.of("node_a", MlJobRequirements.of(900L, 3), "node_b", MlJobRequirements.of(920L, 1), "node_c", MlJobRequirements.of(940L, 0, 5), "node_d", MlJobRequirements.of(960L, 0, 4)), 1000L, 4, 5));
    assertEquals(40L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1)), Map.of("node_a", MlJobRequirements.of(980L, 3), "node_b", MlJobRequirements.of(980L, 4), "node_c", MlJobRequirements.of(970L, 4)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
    assertEquals(30L, MlAutoscalingResourceTracker.checkIfJobsCanBeMovedInLeastEfficientWay(List.of(MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1), MlJobRequirements.of(10L, 1)), Map.of("node_a", MlJobRequirements.of(980L, 1), "node_b", MlJobRequirements.of(980L, 4), "node_c", MlJobRequirements.of(970L, 4)), 1000L, 4, MachineLearning.DEFAULT_MAX_OPEN_JOBS_PER_NODE));
}
302896.721122elasticsearch
public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() {
    Map<String, String> nodeAttr = Map.of(MachineLearning.MACHINE_MEMORY_NODE_ATTR, "1000000000", MachineLearning.MAX_JVM_SIZE_NODE_ATTR, "400000000", MlConfigVersion.ML_CONFIG_VERSION_NODE_ATTR, MlConfigVersion.CURRENT.toString());
    DiscoveryNodes nodes = DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("_node_name1", "_node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, ROLES_WITH_ML)).add(DiscoveryNodeUtils.create("_node_name2", "_node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), nodeAttr, ROLES_WITH_ML)).add(DiscoveryNodeUtils.create("_node_name3", "_node_id3", new TransportAddress(InetAddress.getLoopbackAddress(), 9302), nodeAttr, ROLES_WITH_ML)).build();
    PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder();
    OpenJobPersistentTasksExecutorTests.addJobTask("job_id1", "_node_id1", null, tasksBuilder);
    OpenJobPersistentTasksExecutorTests.addJobTask("job_id2", "_node_id1", null, tasksBuilder);
    OpenJobPersistentTasksExecutorTests.addJobTask("job_id3", "_node_id2", null, tasksBuilder);
    OpenJobPersistentTasksExecutorTests.addJobTask("job_id4", "_node_id2", null, tasksBuilder);
    OpenJobPersistentTasksExecutorTests.addJobTask("job_id5", "_node_id3", null, tasksBuilder);
    PersistentTasksCustomMetadata tasks = tasksBuilder.build();
    ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name"));
    csBuilder.nodes(nodes);
    Metadata.Builder metadata = Metadata.builder();
    metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks);
    csBuilder.metadata(metadata);
    Job job6 = BaseMlIntegTestCase.createFareQuoteJob("job_id6", JOB_MEMORY_REQUIREMENT).build(new Date());
    ClusterState cs = csBuilder.build();
    JobNodeSelector jobNodeSelector = new JobNodeSelector(cs, shuffled(cs.nodes().getAllNodes()), job6.getId(), MlTasks.JOB_TASK_NAME, memoryTracker, 0, node -> nodeFilter(node, job6));
    PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false);
    assertEquals("_node_id3", result.getExecutorNode());
    tasksBuilder = PersistentTasksCustomMetadata.builder(tasks);
    OpenJobPersistentTasksExecutorTests.addJobTask(job6.getId(), "_node_id3", null, tasksBuilder);
    tasks = tasksBuilder.build();
    csBuilder = ClusterState.builder(cs);
    csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks));
    cs = csBuilder.build();
    Job job7 = BaseMlIntegTestCase.createFareQuoteJob("job_id7", JOB_MEMORY_REQUIREMENT).build(new Date());
    jobNodeSelector = new JobNodeSelector(cs, shuffled(cs.nodes().getAllNodes()), job7.getId(), MlTasks.JOB_TASK_NAME, memoryTracker, 0, node -> nodeFilter(node, job7));
    result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false);
    assertNull("no node selected, because OPENING state", result.getExecutorNode());
    assertTrue(result.getExplanation().contains("Node exceeds [2] the maximum number of jobs [2] in opening state"));
    tasksBuilder = PersistentTasksCustomMetadata.builder(tasks);
    tasksBuilder.reassignTask(MlTasks.jobTaskId(job6.getId()), new PersistentTasksCustomMetadata.Assignment("_node_id3", "test assignment"));
    tasks = tasksBuilder.build();
    csBuilder = ClusterState.builder(cs);
    csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks));
    cs = csBuilder.build();
    jobNodeSelector = new JobNodeSelector(cs, shuffled(cs.nodes().getAllNodes()), job7.getId(), MlTasks.JOB_TASK_NAME, memoryTracker, 0, node -> nodeFilter(node, job7));
    result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false);
    assertNull("no node selected, because stale task", result.getExecutorNode());
    assertTrue(result.getExplanation().contains("Node exceeds [2] the maximum number of jobs [2] in opening state"));
    tasksBuilder = PersistentTasksCustomMetadata.builder(tasks);
    tasksBuilder.updateTaskState(MlTasks.jobTaskId(job6.getId()), null);
    tasks = tasksBuilder.build();
    csBuilder = ClusterState.builder(cs);
    csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks));
    cs = csBuilder.build();
    jobNodeSelector = new JobNodeSelector(cs, shuffled(cs.nodes().getAllNodes()), job7.getId(), MlTasks.JOB_TASK_NAME, memoryTracker, 0, node -> nodeFilter(node, job7));
    result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false);
    assertNull("no node selected, because null state", result.getExecutorNode());
    assertTrue(result.getExplanation().contains("Node exceeds [2] the maximum number of jobs [2] in opening state"));
}
301807.4318112elasticsearch
protected AutodetectResult createTestInstance() {
    Bucket bucket;
    List<AnomalyRecord> records = null;
    List<Influencer> influencers = null;
    Quantiles quantiles;
    ModelSnapshot modelSnapshot;
    ModelSizeStats.Builder modelSizeStats;
    ModelPlot modelPlot;
    Annotation annotation;
    Forecast forecast;
    ForecastRequestStats forecastRequestStats;
    CategoryDefinition categoryDefinition;
    CategorizerStats.Builder categorizerStats;
    FlushAcknowledgement flushAcknowledgement;
    String jobId = "foo";
    if (randomBoolean()) {
        bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong());
    } else {
        bucket = null;
    }
    if (randomBoolean()) {
        int size = randomInt(10);
        records = new ArrayList<>(size);
        for (int i = 0; i < size; i++) {
            AnomalyRecord record = new AnomalyRecord(jobId, randomDate(), randomNonNegativeLong());
            record.setProbability(randomDoubleBetween(0.0, 1.0, true));
            records.add(record);
        }
    }
    if (randomBoolean()) {
        int size = randomInt(10);
        influencers = new ArrayList<>(size);
        for (int i = 0; i < size; i++) {
            Influencer influencer = new Influencer(jobId, randomAlphaOfLength(10), randomAlphaOfLength(10), randomDate(), randomNonNegativeLong());
            influencer.setProbability(randomDoubleBetween(0.0, 1.0, true));
            influencers.add(influencer);
        }
    }
    if (randomBoolean()) {
        quantiles = QuantilesTests.createRandomized();
    } else {
        quantiles = null;
    }
    if (randomBoolean()) {
        modelSnapshot = ModelSnapshotTests.createRandomized();
    } else {
        modelSnapshot = null;
    }
    if (randomBoolean()) {
        modelSizeStats = new ModelSizeStats.Builder(jobId).setModelBytes(randomNonNegativeLong());
    } else {
        modelSizeStats = null;
    }
    if (randomBoolean()) {
        modelPlot = new ModelPlot(jobId, randomDate(), randomNonNegativeLong(), randomInt());
    } else {
        modelPlot = null;
    }
    if (randomBoolean()) {
        annotation = AnnotationTests.randomAnnotation(jobId);
    } else {
        annotation = null;
    }
    if (randomBoolean()) {
        forecast = new Forecast(jobId, randomAlphaOfLength(20), randomDate(), randomNonNegativeLong(), randomInt());
    } else {
        forecast = null;
    }
    if (randomBoolean()) {
        forecastRequestStats = new ForecastRequestStats(jobId, randomAlphaOfLength(20));
    } else {
        forecastRequestStats = null;
    }
    if (randomBoolean()) {
        categoryDefinition = new CategoryDefinition(jobId);
        categoryDefinition.setCategoryId(randomLong());
    } else {
        categoryDefinition = null;
    }
    if (randomBoolean()) {
        categorizerStats = new CategorizerStats.Builder(jobId).setCategorizedDocCount(randomNonNegativeLong());
    } else {
        categorizerStats = null;
    }
    if (randomBoolean()) {
        flushAcknowledgement = new FlushAcknowledgement(randomAlphaOfLengthBetween(1, 20), randomInstant(), randomBoolean());
    } else {
        flushAcknowledgement = null;
    }
    return new AutodetectResult(bucket, records, influencers, quantiles, modelSnapshot, modelSizeStats == null ? null : modelSizeStats.build(), modelPlot, annotation, forecast, forecastRequestStats, categoryDefinition, categorizerStats == null ? null : categorizerStats.build(), flushAcknowledgement);
}
302705.6817100elasticsearch
public void testLengthEncodedWriter() throws IOException {
    {
        String[] header = { "one", "two", "three", "four", "five" };
        String[] record1 = { "r1", "r2", "", "rrr4", "r5" };
        String[] record2 = { "y1", "y2", "yy3", "yyy4", "y5" };
        ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
        LengthEncodedWriter writer = new LengthEncodedWriter(bos);
        writer.writeRecord(header);
        final int NUM_RECORDS = 5;
        for (int i = 0; i < NUM_RECORDS; i++) {
            writer.writeRecord(record1);
            writer.writeRecord(record2);
        }
        ByteBuffer bb = ByteBuffer.wrap(bos.toByteArray());
        int numFields = bb.getInt();
        Assert.assertEquals(numFields, header.length);
        for (int i = 0; i < numFields; i++) {
            int recordSize = bb.getInt();
            byte[] charBuff = new byte[recordSize];
            for (int j = 0; j < recordSize; j++) {
                charBuff[j] = bb.get();
            }
            String value = new String(charBuff, StandardCharsets.UTF_8);
            Assert.assertEquals(header[i], value);
        }
        for (int n = 0; n < NUM_RECORDS; n++) {
            numFields = bb.getInt();
            Assert.assertEquals(numFields, record1.length);
            for (int i = 0; i < numFields; i++) {
                int recordSize = bb.getInt();
                byte[] charBuff = new byte[recordSize];
                for (int j = 0; j < recordSize; j++) {
                    charBuff[j] = bb.get();
                }
                String value = new String(charBuff, StandardCharsets.UTF_8);
                Assert.assertEquals(value, record1[i]);
            }
            numFields = bb.getInt();
            Assert.assertEquals(numFields, record2.length);
            for (int i = 0; i < numFields; i++) {
                int recordSize = bb.getInt();
                byte[] charBuff = new byte[recordSize];
                for (int j = 0; j < recordSize; j++) {
                    charBuff[j] = bb.get();
                }
                String value = new String(charBuff, StandardCharsets.UTF_8);
                Assert.assertEquals(value, record2[i]);
            }
        }
    }
    {
        List<String> header = Arrays.asList(new String[] { "one", "two", "three", "four", "five" });
        List<String> record1 = Arrays.asList(new String[] { "r1", "r2", "rr3", "rrr4", "r5" });
        List<String> record2 = Arrays.asList(new String[] { "y1", "y2", "yy3", "yyy4", "y5" });
        ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
        LengthEncodedWriter writer = new LengthEncodedWriter(bos);
        writer.writeRecord(header);
        final int NUM_RECORDS = 5;
        for (int i = 0; i < NUM_RECORDS; i++) {
            writer.writeRecord(record1);
            writer.writeRecord(record2);
        }
        ByteBuffer bb = ByteBuffer.wrap(bos.toByteArray());
        int numFields = bb.getInt();
        Assert.assertEquals(numFields, header.size());
        for (int i = 0; i < numFields; i++) {
            int recordSize = bb.getInt();
            byte[] charBuff = new byte[recordSize];
            for (int j = 0; j < recordSize; j++) {
                charBuff[j] = bb.get();
            }
            String value = new String(charBuff, StandardCharsets.UTF_8);
            Assert.assertEquals(header.get(i), value);
        }
        for (int n = 0; n < NUM_RECORDS; n++) {
            numFields = bb.getInt();
            Assert.assertEquals(numFields, record1.size());
            for (int i = 0; i < numFields; i++) {
                int recordSize = bb.getInt();
                byte[] charBuff = new byte[recordSize];
                for (int j = 0; j < recordSize; j++) {
                    charBuff[j] = bb.get();
                }
                String value = new String(charBuff, StandardCharsets.UTF_8);
                Assert.assertEquals(record1.get(i), value);
            }
            numFields = bb.getInt();
            Assert.assertEquals(numFields, record2.size());
            for (int i = 0; i < numFields; i++) {
                int recordSize = bb.getInt();
                byte[] charBuff = new byte[recordSize];
                for (int j = 0; j < recordSize; j++) {
                    charBuff[j] = bb.get();
                }
                String value = new String(charBuff, StandardCharsets.UTF_8);
                Assert.assertEquals(record2.get(i), value);
            }
        }
    }
}
301907.362132elasticsearch
private static XContentBuilder getTokenIndexMappings() {
    try {
        final XContentBuilder builder = jsonBuilder();
        builder.startObject();
        {
            builder.startObject("_meta");
            builder.field(SECURITY_VERSION_STRING, BWC_MAPPINGS_VERSION);
            builder.field(SystemIndexDescriptor.VERSION_META_KEY, INTERNAL_TOKENS_INDEX_MAPPINGS_FORMAT);
            builder.endObject();
            builder.field("dynamic", "strict");
            builder.startObject("properties");
            {
                builder.startObject("doc_type");
                builder.field("type", "keyword");
                builder.endObject();
                builder.startObject("creation_time");
                builder.field("type", "date");
                builder.field("format", "epoch_millis");
                builder.endObject();
                builder.startObject("refresh_token");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("token");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("refreshed");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("refresh_time");
                        builder.field("type", "date");
                        builder.field("format", "epoch_millis");
                        builder.endObject();
                        builder.startObject("superseding");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("encrypted_tokens");
                                builder.field("type", "binary");
                                builder.endObject();
                                builder.startObject("encryption_iv");
                                builder.field("type", "binary");
                                builder.endObject();
                                builder.startObject("encryption_salt");
                                builder.field("type", "binary");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("invalidated");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("client");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("type");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("user");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("realm");
                                builder.field("type", "keyword");
                                builder.endObject();
                                defineRealmDomain(builder, "realm_domain");
                                builder.startObject("authentication").field("type", "binary").endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                    }
                    builder.endObject();
                }
                builder.endObject();
                builder.startObject("access_token");
                {
                    builder.field("type", "object");
                    builder.startObject("properties");
                    {
                        builder.startObject("user_token");
                        {
                            builder.field("type", "object");
                            builder.startObject("properties");
                            {
                                builder.startObject("id");
                                builder.field("type", "keyword");
                                builder.endObject();
                                builder.startObject("expiration_time");
                                builder.field("type", "date");
                                builder.field("format", "epoch_millis");
                                builder.endObject();
                                builder.startObject("version");
                                builder.field("type", "integer");
                                builder.endObject();
                                builder.startObject("metadata");
                                builder.field("type", "object");
                                builder.field("dynamic", false);
                                builder.endObject();
                                builder.startObject("authentication");
                                builder.field("type", "binary");
                                builder.endObject();
                            }
                            builder.endObject();
                        }
                        builder.endObject();
                        builder.startObject("invalidated");
                        builder.field("type", "boolean");
                        builder.endObject();
                        builder.startObject("token");
                        builder.field("type", "keyword");
                        builder.endObject();
                        builder.startObject("realm");
                        builder.field("type", "keyword");
                        builder.endObject();
                        defineRealmDomain(builder, "realm_domain");
                    }
                    builder.endObject();
                }
                builder.endObject();
            }
            builder.endObject();
        }
        builder.endObject();
        return builder;
    } catch (IOException e) {
        throw new UncheckedIOException("Failed to build " + TOKENS_INDEX_CONCRETE_NAME + " index mappings", e);
    }
}
303702.961113elasticsearch
public void testJsonObjectMerging() throws Exception {
    final Nonce nonce = new Nonce();
    final String subject = "janedoe";
    final Tuple<Key, JWKSet> keyMaterial = getRandomJwkForType(randomFrom("ES", "RS"));
    final JWK jwk = keyMaterial.v2().getKeys().get(0);
    RelyingPartyConfiguration rpConfig = getRpConfig(jwk.getAlgorithm().getName());
    OpenIdConnectProviderConfiguration opConfig = getOpConfig();
    Map<String, Object> address = new JWTClaimsSet.Builder().claim("street_name", "12, Test St.").claim("locality", "New York").claim("region", "NY").claim("country", "USA").build().toJSONObject();
    Map<String, Object> idTokenObject = new JWTClaimsSet.Builder().jwtID(randomAlphaOfLength(8)).audience(rpConfig.getClientId().getValue()).expirationTime(Date.from(now().plusSeconds(3600))).issuer(opConfig.getIssuer().getValue()).issueTime(Date.from(now().minusSeconds(200))).notBeforeTime(Date.from(now().minusSeconds(200))).claim("nonce", nonce).claim("given_name", "Jane Doe").claim("family_name", "Doe").claim("profile", "https://test-profiles.com/jane.doe").claim("name", "Jane").claim("email", "jane.doe@example.com").claim("roles", new JSONArray().appendElement("role1").appendElement("role2").appendElement("role3")).claim("address", address).subject(subject).build().toJSONObject();
    Map<String, Object> userinfoObject = new JWTClaimsSet.Builder().claim("given_name", "Jane Doe").claim("family_name", "Doe").claim("profile", "https://test-profiles.com/jane.doe").claim("name", "Jane").claim("email", "jane.doe@example.com").subject(subject).build().toJSONObject();
    OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userinfoObject);
    assertTrue(idTokenObject.containsKey("given_name"));
    assertTrue(idTokenObject.containsKey("family_name"));
    assertTrue(idTokenObject.containsKey("profile"));
    assertTrue(idTokenObject.containsKey("name"));
    assertTrue(idTokenObject.containsKey("email"));
    assertTrue(idTokenObject.containsKey("address"));
    assertTrue(idTokenObject.containsKey("roles"));
    assertTrue(idTokenObject.containsKey("nonce"));
    assertTrue(idTokenObject.containsKey("sub"));
    assertTrue(idTokenObject.containsKey("jti"));
    assertTrue(idTokenObject.containsKey("aud"));
    assertTrue(idTokenObject.containsKey("exp"));
    assertTrue(idTokenObject.containsKey("iss"));
    assertTrue(idTokenObject.containsKey("iat"));
    assertTrue(idTokenObject.containsKey("email"));
    Map<String, Object> wrongTypeInfo = new JWTClaimsSet.Builder().claim("given_name", "Jane Doe").claim("family_name", 123334434).claim("profile", "https://test-profiles.com/jane.doe").claim("name", "Jane").claim("email", "jane.doe@example.com").subject(subject).build().toJSONObject();
    final IllegalStateException e = expectThrows(IllegalStateException.class, () -> {
        OpenIdConnectAuthenticator.mergeObjects(idTokenObject, wrongTypeInfo);
    });
    Map<String, Object> overwriteUserInfo = new JWTClaimsSet.Builder().claim("given_name", "Jane Doe").claim("family_name", "Doe").claim("profile", "https://test-profiles.com/jane.doe2").claim("name", "Jane").claim("email", "jane.doe@mail.com").subject(subject).build().toJSONObject();
    OpenIdConnectAuthenticator.mergeObjects(idTokenObject, overwriteUserInfo);
    assertThat(idTokenObject.get("email"), equalTo("jane.doe@example.com"));
    assertThat(idTokenObject.get("profile"), equalTo("https://test-profiles.com/jane.doe"));
    Map<String, Object> userInfoWithRoles = new JWTClaimsSet.Builder().claim("given_name", "Jane Doe").claim("family_name", "Doe").claim("profile", "https://test-profiles.com/jane.doe").claim("name", "Jane").claim("email", "jane.doe@example.com").claim("roles", new JSONArray().appendElement("role4").appendElement("role5")).subject(subject).build().toJSONObject();
    OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithRoles);
    assertThat((JSONArray) idTokenObject.get("roles"), containsInAnyOrder("role1", "role2", "role3", "role4", "role5"));
    Map<String, Object> addressUserInfo = new JWTClaimsSet.Builder().claim("street_name", "12, Test St.").claim("locality", "New York").claim("postal_code", "10024").build().toJSONObject();
    Map<String, Object> userInfoWithAddress = new JWTClaimsSet.Builder().claim("given_name", "Jane Doe").claim("family_name", "Doe").claim("profile", "https://test-profiles.com/jane.doe").claim("name", "Jane").claim("email", "jane.doe@example.com").claim("roles", new JSONArray().appendElement("role4").appendElement("role5")).claim("address", addressUserInfo).subject(subject).build().toJSONObject();
    OpenIdConnectAuthenticator.mergeObjects(idTokenObject, userInfoWithAddress);
    assertTrue(idTokenObject.containsKey("address"));
    @SuppressWarnings("unchecked")
    Map<String, Object> combinedAddress = (Map<String, Object>) idTokenObject.get("address");
    assertTrue(combinedAddress.containsKey("street_name"));
    assertTrue(combinedAddress.containsKey("locality"));
    assertTrue(combinedAddress.containsKey("street_name"));
    assertTrue(combinedAddress.containsKey("postal_code"));
    assertTrue(combinedAddress.containsKey("region"));
    assertTrue(combinedAddress.containsKey("country"));
}
302269.417113elasticsearch
public void testCreateSigningCredentialFromKeyStoreFailureScenarios() throws Exception {
    assumeFalse("Can't run in a FIPS JVM, PKCS12 keystores are not usable", inFipsJvm());
    final Path dir = createTempDir();
    final Settings.Builder builder = Settings.builder().put(REALM_SETTINGS_PREFIX + ".type", "saml").put("path.home", dir);
    final Path ksFile = dir.resolve("cred.p12");
    final Tuple<X509Certificate, PrivateKey> certKeyPair1 = readKeyPair("RSA_4096");
    final Tuple<X509Certificate, PrivateKey> certKeyPair2 = readKeyPair("RSA_2048");
    final Tuple<X509Certificate, PrivateKey> certKeyPair3 = readRandomKeyPair("EC");
    final KeyStore ks = KeyStore.getInstance("PKCS12");
    ks.load(null);
    final boolean noRSAKeysInKS = randomBoolean();
    if (noRSAKeysInKS == false) {
        ks.setKeyEntry(getAliasName(certKeyPair1), certKeyPair1.v2(), "key-password".toCharArray(), new Certificate[] { certKeyPair1.v1() });
        ks.setKeyEntry(getAliasName(certKeyPair2), certKeyPair2.v2(), "key-password".toCharArray(), new Certificate[] { certKeyPair2.v1() });
    }
    ks.setKeyEntry(getAliasName(certKeyPair3), certKeyPair3.v2(), "key-password".toCharArray(), new Certificate[] { certKeyPair3.v1() });
    try (OutputStream out = Files.newOutputStream(ksFile)) {
        ks.store(out, "ks-password".toCharArray());
    }
    builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.path", ksFile.toString());
    builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.type", "PKCS12");
    final boolean isSigningKeyStoreAliasSet = randomBoolean();
    final Tuple<X509Certificate, PrivateKey> chosenAliasCertKeyPair;
    final String unknownAlias = randomAlphaOfLength(5);
    if (isSigningKeyStoreAliasSet) {
        chosenAliasCertKeyPair = randomFrom(Arrays.asList(certKeyPair3, null));
        if (chosenAliasCertKeyPair == null) {
            builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.alias", unknownAlias);
        } else {
            builder.put(REALM_SETTINGS_PREFIX + ".signing.keystore.alias", getAliasName(chosenAliasCertKeyPair));
        }
    } else {
        chosenAliasCertKeyPair = null;
    }
    MockSecureSettings secureSettings = new MockSecureSettings();
    secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_password", "ks-password");
    secureSettings.setString(REALM_SETTINGS_PREFIX + ".signing.keystore.secure_key_password", "key-password");
    builder.setSecureSettings(secureSettings);
    final Settings settings = builder.build();
    final RealmConfig realmConfig = realmConfigFromGlobalSettings(settings);
    if (isSigningKeyStoreAliasSet) {
        if (chosenAliasCertKeyPair == null) {
            final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig));
            final String expectedErrorMessage = "The configured key store for " + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " does not have a key associated with alias [" + unknownAlias + "] " + "(from setting " + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")";
            assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage());
        } else {
            final String chosenAliasName = getAliasName(chosenAliasCertKeyPair);
            final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig));
            final String expectedErrorMessage = "The key associated with alias [" + chosenAliasName + "] " + "(from setting " + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ") uses unsupported key algorithm type [" + chosenAliasCertKeyPair.v2().getAlgorithm() + "], only RSA is supported";
            assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage());
        }
    } else {
        if (noRSAKeysInKS) {
            final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig));
            final String expectedErrorMessage = "The configured key store for " + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " does not contain any RSA key pairs";
            assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage());
        } else {
            final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> SamlRealm.buildSigningConfiguration(realmConfig));
            final String expectedErrorMessage = "The configured key store for " + RealmSettings.realmSettingPrefix(realmConfig.identifier()) + "signing." + " has multiple keys but no alias has been specified (from setting " + RealmSettings.getFullSettingKey(realmConfig, SamlRealmSettings.SIGNING_KEY_ALIAS) + ")";
            assertEquals(expectedErrorMessage, illegalArgumentException.getLocalizedMessage());
        }
    }
}
303056.135112elasticsearch
public void testAuthorizationForMappingUpdates() {
    final Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build();
    final Metadata.Builder metadata = new Metadata.Builder().put(new IndexMetadata.Builder("test1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true).put(new IndexMetadata.Builder("test_write1").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true);
    int numBackingIndices = randomIntBetween(1, 3);
    List<IndexMetadata> backingIndices = new ArrayList<>();
    for (int backingIndexNumber = 1; backingIndexNumber <= numBackingIndices; backingIndexNumber++) {
        backingIndices.add(createBackingIndexMetadata(DataStream.getDefaultBackingIndexName("test_write2", backingIndexNumber)));
    }
    DataStream ds = DataStreamTestHelper.newInstance("test_write2", backingIndices.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()));
    metadata.put(ds);
    for (IndexMetadata index : backingIndices) {
        metadata.put(index, false);
    }
    SortedMap<String, IndexAbstraction> lookup = metadata.build().getIndicesLookup();
    FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY);
    IndicesPermission core = new IndicesPermission.Builder(RESTRICTED_INDICES).addGroup(IndexPrivilege.INDEX, FieldPermissions.DEFAULT, null, randomBoolean(), "test*").addGroup(IndexPrivilege.WRITE, new FieldPermissions(fieldPermissionDef(null, new String[] { "denied_field" })), null, randomBoolean(), "test_write*").build();
    IndicesAccessControl iac = core.authorize(TransportPutMappingAction.TYPE.name(), Sets.newHashSet("test1", "test_write1"), lookup, fieldPermissionsCache);
    assertThat(iac.isGranted(), is(true));
    assertThat(iac.getIndexPermissions("test1"), is(notNullValue()));
    assertThat(iac.hasIndexPermissions("test1"), is(true));
    assertThat(iac.getIndexPermissions("test_write1"), is(notNullValue()));
    assertThat(iac.hasIndexPermissions("test_write1"), is(true));
    assertWarnings("the index privilege [index] allowed the update mapping action [" + TransportPutMappingAction.TYPE.name() + "] on " + "index [test1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges", "the index privilege [index] allowed the update mapping action [" + TransportPutMappingAction.TYPE.name() + "] on " + "index [test_write1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges", "the index privilege [write] allowed the update mapping action [" + TransportPutMappingAction.TYPE.name() + "] on " + "index [test_write1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges");
    iac = core.authorize(TransportAutoPutMappingAction.TYPE.name(), Sets.newHashSet("test1", "test_write1"), lookup, fieldPermissionsCache);
    assertThat(iac.isGranted(), is(true));
    assertThat(iac.getIndexPermissions("test1"), is(notNullValue()));
    assertThat(iac.hasIndexPermissions("test1"), is(true));
    assertThat(iac.getIndexPermissions("test_write1"), is(notNullValue()));
    assertThat(iac.hasIndexPermissions("test_write1"), is(true));
    assertWarnings("the index privilege [index] allowed the update mapping action [" + TransportAutoPutMappingAction.TYPE.name() + "] on " + "index [test1], this privilege will not permit mapping updates in the next major release - " + "users who require access to update mappings must be granted explicit privileges");
    iac = core.authorize(TransportAutoPutMappingAction.TYPE.name(), Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache);
    assertThat(iac.isGranted(), is(true));
    assertThat(iac.getIndexPermissions("test_write2"), is(notNullValue()));
    assertThat(iac.hasIndexPermissions("test_write2"), is(true));
    iac = core.authorize(TransportPutMappingAction.TYPE.name(), Sets.newHashSet("test_write2"), lookup, fieldPermissionsCache);
    assertThat(iac.getIndexPermissions("test_write2"), is(nullValue()));
    assertThat(iac.hasIndexPermissions("test_write2"), is(false));
    iac = core.authorize(TransportAutoPutMappingAction.TYPE.name(), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache);
    assertThat(iac.isGranted(), is(true));
    for (IndexMetadata im : backingIndices) {
        assertThat(iac.getIndexPermissions(im.getIndex().getName()), is(notNullValue()));
        assertThat(iac.hasIndexPermissions(im.getIndex().getName()), is(true));
    }
    iac = core.authorize(TransportPutMappingAction.TYPE.name(), Sets.newHashSet(backingIndices.stream().map(im -> im.getIndex().getName()).collect(Collectors.toList())), lookup, fieldPermissionsCache);
    assertThat(iac.isGranted(), is(false));
    for (IndexMetadata im : backingIndices) {
        assertThat(iac.getIndexPermissions(im.getIndex().getName()), is(nullValue()));
        assertThat(iac.hasIndexPermissions(im.getIndex().getName()), is(false));
    }
}
302367.146111elasticsearch
public void testRetentionWhileSnapshotInProgress() throws Exception {
    final String indexName = "test";
    final String policyId = "slm-policy";
    int docCount = 20;
    for (int i = 0; i < docCount; i++) {
        index(indexName, null, Collections.singletonMap("foo", "bar"));
    }
    createRepository(REPO, "mock");
    logger.info("--> creating policy {}", policyId);
    createSnapshotPolicy(policyId, "snap", NEVER_EXECUTE_CRON_SCHEDULE, REPO, indexName, true, false, new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null));
    final String completedSnapshotName = executePolicy(policyId);
    logger.info("--> kicked off snapshot {}", completedSnapshotName);
    assertBusy(() -> {
        try {
            SnapshotsStatusResponse s = getSnapshotStatus(completedSnapshotName);
            assertThat("expected a snapshot but none were returned", s.getSnapshots().size(), equalTo(1));
            SnapshotStatus status = s.getSnapshots().get(0);
            logger.info("--> waiting for snapshot {} to be completed, got: {}", completedSnapshotName, status.getState());
            assertThat(status.getState(), equalTo(SnapshotsInProgress.State.SUCCESS));
        } catch (SnapshotMissingException e) {
            logger.error("expected a snapshot but it was missing", e);
            fail("expected a snapshot with name " + completedSnapshotName + " but it does not exist");
        }
    });
    awaitNoMoreRunningOperations(randomFrom(dataNodeNames));
    logger.info("--> indexing more docs to force new segment files");
    for (int i = 0; i < docCount; i++) {
        index(indexName, null, Collections.singletonMap("foo", "bar"));
    }
    refresh(indexName);
    try {
        logger.info("--> blocking data nodes from completing snapshot");
        blockAllDataNodes(REPO);
        logger.info("--> blocked data nodes, executing policy");
        final String secondSnapName = executePolicy(policyId);
        logger.info("--> executed policy, got snapname [{}]", secondSnapName);
        logger.info("--> Waiting for at least one data node to hit the block");
        waitForBlockOnAnyDataNode(REPO);
        assertBusy(() -> {
            logger.info("--> at least one data node has hit the block");
            GetSnapshotLifecycleAction.Response getResp = client().execute(GetSnapshotLifecycleAction.INSTANCE, new GetSnapshotLifecycleAction.Request(policyId)).get();
            logger.info("--> checking for in progress snapshot...");
            assertThat(getResp.getPolicies().size(), greaterThan(0));
            SnapshotLifecyclePolicyItem item = getResp.getPolicies().get(0);
            assertNotNull(item.getSnapshotInProgress());
            SnapshotLifecyclePolicyItem.SnapshotInProgress inProgress = item.getSnapshotInProgress();
            assertThat(inProgress.getSnapshotId().getName(), equalTo(secondSnapName));
            assertThat(inProgress.getStartTime(), greaterThan(0L));
            assertThat(inProgress.getState(), anyOf(equalTo(SnapshotsInProgress.State.INIT), equalTo(SnapshotsInProgress.State.STARTED)));
            assertNull(inProgress.getFailure());
        }, 60, TimeUnit.SECONDS);
        logger.info("--> triggering retention");
        assertTrue(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get().isAcknowledged());
        logger.info("--> unblocking snapshots");
        unblockNode(REPO, internalCluster().getMasterName());
        unblockAllDataNodes(REPO);
        assertBusy(() -> {
            clusterAdmin().prepareReroute().get();
            logger.info("--> waiting for snapshot to be deleted");
            try {
                SnapshotsStatusResponse s = getSnapshotStatus(completedSnapshotName);
                assertNull("expected no snapshot but one was returned", s.getSnapshots().get(0));
            } catch (SnapshotMissingException e) {
            }
        });
        assertBusy(() -> {
            try {
                logger.info("--> cancelling snapshot {}", secondSnapName);
                clusterAdmin().prepareDeleteSnapshot(REPO, secondSnapName).get();
            } catch (ConcurrentSnapshotExecutionException e) {
                logger.info("--> attempted to stop second snapshot", e);
                fail("attempted to stop second snapshot but a snapshot or delete was in progress");
            }
        });
        assertBusy(() -> {
            assertResponse(prepareSearch(".slm-history*").setQuery(QueryBuilders.matchQuery("snapshot_name", completedSnapshotName)), resp -> {
                logger.info("--> checking history written for {}, got: {}", completedSnapshotName, Strings.arrayToCommaDelimitedString(resp.getHits().getHits()));
                assertThat(resp.getHits().getTotalHits().value, equalTo(2L));
            });
        });
    } finally {
        unblockNode(REPO, internalCluster().getMasterName());
        unblockAllDataNodes(REPO);
    }
}
301791.731131elasticsearch
public void testIsYellowWhenPoliciesHaveFailedForMoreThanWarningThreshold() {
    long execTime = System.currentTimeMillis();
    long window = TimeUnit.HOURS.toMillis(24) + 5000L;
    long failedInvocations1 = randomLongBetween(5L, Long.MAX_VALUE);
    long failedInvocations2 = randomLongBetween(5L, Long.MAX_VALUE);
    long failedInvocations3 = randomLongBetween(5L, Long.MAX_VALUE);
    var clusterState = createClusterStateWith(new SnapshotLifecycleMetadata(Map.of("test-policy", SnapshotLifecyclePolicyMetadata.builder().setPolicy(new SnapshotLifecyclePolicy("test-policy", "<test-policy-{now/d}>", "", "test-repository", null, null)).setVersion(1L).setModifiedDate(System.currentTimeMillis()).setLastSuccess(snapshotInvocation(execTime, execTime + 1000L)).setLastFailure(snapshotInvocation(null, execTime + window + 1000L)).setInvocationsSinceLastSuccess(failedInvocations1).build(), "test-policy-without-any-success", SnapshotLifecyclePolicyMetadata.builder().setPolicy(new SnapshotLifecyclePolicy("test-policy-without-any-success", "<test-policy-{now/d}>", "", "test-repository", null, null)).setVersion(1L).setModifiedDate(System.currentTimeMillis()).setLastSuccess(null).setLastFailure(snapshotInvocation(null, execTime + window + 1000L)).setInvocationsSinceLastSuccess(failedInvocations2).build(), "test-policy-without-success-start-time", SnapshotLifecyclePolicyMetadata.builder().setPolicy(new SnapshotLifecyclePolicy("test-policy-without-success-start-time", "<test-policy-{now/d}>", "", "test-repository", null, null)).setVersion(1L).setModifiedDate(System.currentTimeMillis()).setLastSuccess(snapshotInvocation(null, execTime)).setLastFailure(snapshotInvocation(null, execTime + window + 1000L)).setInvocationsSinceLastSuccess(failedInvocations3).build()), RUNNING, null));
    var service = createSlmHealthIndicatorService(clusterState);
    HealthIndicatorResult calculate = service.calculate(true, HealthInfo.EMPTY_HEALTH_INFO);
    assertThat(calculate, equalTo(new HealthIndicatorResult(NAME, YELLOW, "Encountered [3] unhealthy snapshot lifecycle management policies.", new SimpleHealthIndicatorDetails(Map.of("slm_status", RUNNING, "policies", 3, "unhealthy_policies", Map.of("count", 3, "invocations_since_last_success", Map.of("test-policy", failedInvocations1, "test-policy-without-any-success", failedInvocations2, "test-policy-without-success-start-time", failedInvocations3)))), Collections.singletonList(new HealthIndicatorImpact(NAME, SlmHealthIndicatorService.STALE_SNAPSHOTS_IMPACT_ID, 2, "Some automated snapshots have not had a successful execution recently. Indices restored from affected " + "snapshots may not contain recent changes.", List.of(ImpactArea.BACKUP))), List.of(new Diagnosis(SlmHealthIndicatorService.checkRecentlyFailedSnapshots("Several automated snapshot policies are unhealthy:\n" + "- [test-policy] had [" + failedInvocations1 + "] repeated failures without successful execution since [" + FORMATTER.formatMillis(execTime) + "]\n" + "- [test-policy-without-any-success] had [" + failedInvocations2 + "] repeated failures without successful execution\n" + "- [test-policy-without-success-start-time] had [" + failedInvocations3 + "] repeated failures without successful execution", "Check the snapshot lifecycle policies for detailed failure info:\n" + "- GET /_slm/policy/test-policy?human\n" + "- GET /_slm/policy/test-policy-without-any-success?human\n" + "- GET /_slm/policy/test-policy-without-success-start-time?human"), List.of(new Diagnosis.Resource(Type.SLM_POLICY, List.of("test-policy", "test-policy-without-any-success", "test-policy-without-success-start-time"))))))));
}
303176.71108elasticsearch
public void testUnsupportedUpdateMethods() throws IOException, SQLException {
    index("test", "1", builder -> builder.field("test", "test"));
    try (Connection conn = esJdbc();
        PreparedStatement statement = conn.prepareStatement(SELECT_WILDCARD);
        ResultSet r = statement.executeQuery()) {
        r.next();
        Blob b = null;
        InputStream i = null;
        Clob c = null;
        NClob nc = null;
        Reader rd = null;
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBytes("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateArray("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateAsciiStream("", null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBigDecimal("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBinaryStream("", null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, b));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, i));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", b));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", i));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBlob("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean(1, false));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateBoolean("", false));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte(1, (byte) 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateByte("", (byte) 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream(1, null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateCharacterStream("", null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, c));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, rd));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", c));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", rd));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob(1, null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateClob("", null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateDate("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble(1, 0d));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateDouble("", 0d));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat(1, 0f));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateFloat("", 0f));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt(1, 0));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateInt("", 0));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong(1, 0L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateLong("", 0L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream(1, null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNCharacterStream("", null, 1L));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, nc));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, rd));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", nc));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", rd));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNClob("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNString("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull(1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateNull(""));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject(1, null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateObject("", null, 1));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRef("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRowId("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateSQLXML("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort(1, (short) 0));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateShort("", (short) 0));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateString(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateString("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateTime("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp(1, null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateTimestamp("", null));
        assertThrowsWritesUnsupportedForUpdate(() -> r.insertRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.updateRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.deleteRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.cancelRowUpdates());
        assertThrowsWritesUnsupportedForUpdate(() -> r.moveToInsertRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.refreshRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.moveToCurrentRow());
        assertThrowsWritesUnsupportedForUpdate(() -> r.rowUpdated());
        assertThrowsWritesUnsupportedForUpdate(() -> r.rowInserted());
        assertThrowsWritesUnsupportedForUpdate(() -> r.rowDeleted());
    }
}
308968.72183elasticsearch
public void testDiffEdgeCases() {
    ZoneId zoneId = ZoneId.of("Etc/GMT-10");
    Literal dt1 = l(dateTime(2010, 12, 31, 18, 0, 0, 0));
    Literal dt2 = l(dateTime(2019, 1, 1, 18, 0, 0, 0));
    assertEquals(9, new DateDiff(Source.EMPTY, l("years"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-9, new DateDiff(Source.EMPTY, l("year"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(8, new DateDiff(Source.EMPTY, l("yyyy"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-8, new DateDiff(Source.EMPTY, l("yy"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    assertEquals(33, new DateDiff(Source.EMPTY, l("quarter"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-33, new DateDiff(Source.EMPTY, l("qq"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(32, new DateDiff(Source.EMPTY, l("quarter"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-32, new DateDiff(Source.EMPTY, l("qq"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    assertEquals(97, new DateDiff(Source.EMPTY, l("month"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-97, new DateDiff(Source.EMPTY, l("months"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(96, new DateDiff(Source.EMPTY, l("mm"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-96, new DateDiff(Source.EMPTY, l("m"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1976, 9, 9, 0, 0, 0, 0));
    dt2 = l(dateTime(1983, 5, 22, 0, 0, 0, 0));
    assertEquals(350, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-350, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(350, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-350, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1988, 1, 2, 0, 0, 0, 0));
    dt2 = l(dateTime(1987, 12, 29, 0, 0, 0, 0));
    assertEquals(0, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(0, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(0, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(0, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1988, 1, 5, 0, 0, 0, 0));
    dt2 = l(dateTime(1996, 5, 13, 0, 0, 0, 0));
    assertEquals(436, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-436, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(436, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-436, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1999, 8, 20, 0, 0, 0, 0));
    dt2 = l(dateTime(1974, 3, 17, 0, 0, 0, 0));
    assertEquals(-1326, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(1326, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(-1326, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(1326, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1997, 2, 2, 0, 0, 0, 0));
    dt2 = l(dateTime(1997, 9, 19, 0, 0, 0, 0));
    assertEquals(32, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-32, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(32, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-32, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1980, 11, 7, 0, 0, 0, 0));
    dt2 = l(dateTime(1979, 4, 1, 0, 0, 0, 0));
    assertEquals(-83, new DateDiff(Source.EMPTY, l("week"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(83, new DateDiff(Source.EMPTY, l("weeks"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(-83, new DateDiff(Source.EMPTY, l("wk"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(83, new DateDiff(Source.EMPTY, l("ww"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1997, 9, 19, 0, 0, 0, 0));
    dt2 = l(dateTime(2004, 8, 2, 7, 59, 23, 0));
    assertEquals(60223, new DateDiff(Source.EMPTY, l("hour"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-60223, new DateDiff(Source.EMPTY, l("hours"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(60223, new DateDiff(Source.EMPTY, l("hh"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-60223, new DateDiff(Source.EMPTY, l("hh"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1997, 9, 19, 0, 0, 0, 0));
    dt2 = l(dateTime(2004, 8, 2, 7, 59, 59, 999999999));
    assertEquals(60223, new DateDiff(Source.EMPTY, l("hour"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-60223, new DateDiff(Source.EMPTY, l("hours"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(60223, new DateDiff(Source.EMPTY, l("hh"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-60223, new DateDiff(Source.EMPTY, l("hh"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(2002, 4, 27, 0, 0, 0, 0));
    dt2 = l(dateTime(2004, 7, 28, 12, 34, 28, 0));
    assertEquals(1185874, new DateDiff(Source.EMPTY, l("minute"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-1185874, new DateDiff(Source.EMPTY, l("minutes"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(1185874, new DateDiff(Source.EMPTY, l("mi"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-1185874, new DateDiff(Source.EMPTY, l("n"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1995, 9, 3, 0, 0, 0, 0));
    dt2 = l(dateTime(2004, 7, 26, 12, 30, 34, 0));
    assertEquals(4679310, new DateDiff(Source.EMPTY, l("minute"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-4679310, new DateDiff(Source.EMPTY, l("minutes"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(4679310, new DateDiff(Source.EMPTY, l("mi"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-4679310, new DateDiff(Source.EMPTY, l("n"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
    dt1 = l(dateTime(1997, 5, 30, 0, 0, 0, 0));
    dt2 = l(dateTime(2004, 7, 28, 23, 30, 59, 999999999));
    assertEquals(3768450, new DateDiff(Source.EMPTY, l("minute"), dt1, dt2, UTC).makePipe().asProcessor().process(null));
    assertEquals(-3768450, new DateDiff(Source.EMPTY, l("minutes"), dt2, dt1, UTC).makePipe().asProcessor().process(null));
    assertEquals(3768450, new DateDiff(Source.EMPTY, l("mi"), dt1, dt2, zoneId).makePipe().asProcessor().process(null));
    assertEquals(-3768450, new DateDiff(Source.EMPTY, l("n"), dt2, dt1, zoneId).makePipe().asProcessor().process(null));
}
302928.711594elasticsearch
private static SearchRequestBuilder searchRequestBuilder(RestCancellableNodeClient client, VectorTileRequest request) throws IOException {
    final SearchRequestBuilder searchRequestBuilder = client.prepareSearch(request.getIndexes());
    searchRequestBuilder.setSize(request.getSize());
    searchRequestBuilder.setFetchSource(false);
    searchRequestBuilder.setTrackTotalHitsUpTo(request.getTrackTotalHitsUpTo());
    for (FieldAndFormat field : request.getFieldAndFormats()) {
        searchRequestBuilder.addFetchField(field);
    }
    String args = request.getZ() + "/" + request.getX() + "/" + request.getY() + "@" + request.getExtent() + ":" + request.getBuffer();
    searchRequestBuilder.addFetchField(new FieldAndFormat(request.getField(), "mvt(" + args + ")"));
    Map<String, Object> runtimeMappings = request.getRuntimeMappings();
    if (request.getWithLabels()) {
        Map<String, Object> mappings = new HashMap<>();
        if (runtimeMappings.size() > 0) {
            mappings.putAll(runtimeMappings);
        }
        HashMap<String, Object> labelsMap = new HashMap<>();
        labelsMap.put("type", "geo_point");
        labelsMap.put("script", "GeoPoint point = doc['" + request.getField() + "'].getLabelPosition(); emit(point.getLat(), point.getLon());");
        mappings.put(LABEL_POSITION_FIELD_NAME, labelsMap);
        searchRequestBuilder.addFetchField(LABEL_POSITION_FIELD_NAME);
        runtimeMappings = mappings;
    }
    searchRequestBuilder.setRuntimeMappings(runtimeMappings);
    final Rectangle boxFilter = request.getGridAgg().bufferTile(request.getBoundingBox(), request.getZ(), request.getGridPrecision());
    QueryBuilder qBuilder = QueryBuilders.geoShapeQuery(request.getField(), boxFilter);
    if (request.getQueryBuilder() != null) {
        final BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
        boolQueryBuilder.filter(request.getQueryBuilder());
        boolQueryBuilder.filter(qBuilder);
        qBuilder = boolQueryBuilder;
    }
    searchRequestBuilder.setQuery(qBuilder);
    if (request.getGridPrecision() > 0) {
        final GeoBoundingBox boundingBox;
        if (request.getGridAgg().needsBounding(request.getZ(), request.getGridPrecision())) {
            final Rectangle rectangle = request.getBoundingBox();
            boundingBox = new GeoBoundingBox(new GeoPoint(rectangle.getMaxLat(), rectangle.getMinLon()), new GeoPoint(rectangle.getMinLat(), rectangle.getMaxLon()));
        } else {
            boundingBox = new GeoBoundingBox(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN));
        }
        final GeoGridAggregationBuilder tileAggBuilder = request.getGridAgg().newAgg(GRID_FIELD).field(request.getField()).precision(request.getGridAgg().gridPrecisionToAggPrecision(request.getZ(), request.getGridPrecision())).setGeoBoundingBox(boundingBox).size(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS);
        searchRequestBuilder.addAggregation(tileAggBuilder);
        searchRequestBuilder.addAggregation(new StatsBucketPipelineAggregationBuilder(COUNT_TAG, GRID_FIELD + "." + COUNT_TAG));
        if (request.getGridType() == GridType.CENTROID) {
            tileAggBuilder.subAggregation(new GeoCentroidAggregationBuilder(CENTROID_AGG_NAME).field(request.getField()));
        }
        final List<MetricsAggregationBuilder<?>> aggregations = request.getAggBuilder();
        for (MetricsAggregationBuilder<?> aggregation : aggregations) {
            if (aggregation.getName().startsWith(INTERNAL_AGG_PREFIX)) {
                throw new IllegalArgumentException("Invalid aggregation name [" + aggregation.getName() + "]. Aggregation names cannot start with prefix '" + INTERNAL_AGG_PREFIX + "'");
            }
            tileAggBuilder.subAggregation(aggregation);
            final Set<String> metricNames = aggregation.metricNames();
            for (String metric : metricNames) {
                final String bucketPath;
                if (metric.contains(".")) {
                    bucketPath = GRID_FIELD + ">" + aggregation.getName() + "[" + metric + "]";
                } else {
                    bucketPath = GRID_FIELD + ">" + aggregation.getName() + "." + metric;
                }
                final String aggName = metricNames.size() == 1 ? aggregation.getName() : aggregation.getName() + "." + metric;
                searchRequestBuilder.addAggregation(new StatsBucketPipelineAggregationBuilder(aggName, bucketPath));
            }
        }
    }
    if (request.getExactBounds()) {
        final GeoBoundsAggregationBuilder boundsBuilder = new GeoBoundsAggregationBuilder(BOUNDS_FIELD).field(request.getField()).wrapLongitude(false);
        searchRequestBuilder.addAggregation(boundsBuilder);
    }
    for (SortBuilder<?> sortBuilder : request.getSortBuilders()) {
        searchRequestBuilder.addSort(sortBuilder);
    }
    return searchRequestBuilder;
}
303079.521695elasticsearch
public void testPagerDutyXContent() throws IOException {
    String serviceKey = randomAlphaOfLength(3);
    boolean attachPayload = randomBoolean();
    Payload payload = null;
    if (attachPayload) {
        payload = new Payload.Simple(Collections.singletonMap(randomAlphaOfLength(3), randomAlphaOfLength(3)));
    }
    String watchId = randomAlphaOfLength(3);
    String description = randomAlphaOfLength(3);
    String eventType = randomAlphaOfLength(3);
    String incidentKey = rarely() ? null : randomAlphaOfLength(3);
    String client = rarely() ? null : randomAlphaOfLength(3);
    String clientUrl = rarely() ? null : randomAlphaOfLength(3);
    String account = rarely() ? null : randomAlphaOfLength(3);
    IncidentEventContext[] contexts = null;
    List<IncidentEventContext> links = new ArrayList<>();
    List<IncidentEventContext> images = new ArrayList<>();
    if (randomBoolean()) {
        int numContexts = randomIntBetween(0, 3);
        contexts = new IncidentEventContext[numContexts];
        for (int i = 0; i < numContexts; i++) {
            if (randomBoolean()) {
                contexts[i] = IncidentEventContext.link("href", "text");
                links.add(contexts[i]);
            } else {
                contexts[i] = IncidentEventContext.image("src", "href", "alt");
                images.add(contexts[i]);
            }
        }
    }
    HttpProxy proxy = rarely() ? null : HttpProxy.NO_PROXY;
    IncidentEvent event = new IncidentEvent(description, eventType, incidentKey, client, clientUrl, account, attachPayload, contexts, proxy);
    XContentBuilder jsonBuilder = jsonBuilder();
    jsonBuilder.startObject();
    event.buildAPIXContent(jsonBuilder, ToXContent.EMPTY_PARAMS, serviceKey, payload, watchId);
    jsonBuilder.endObject();
    XContentParser parser = createParser(jsonBuilder);
    parser.nextToken();
    ObjectPath objectPath = ObjectPath.createFromXContent(jsonBuilder.contentType().xContent(), BytesReference.bytes(jsonBuilder));
    String actualServiceKey = objectPath.evaluate(IncidentEvent.Fields.ROUTING_KEY.getPreferredName());
    String actualWatchId = objectPath.evaluate(IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SOURCE.getPreferredName());
    if (actualWatchId == null) {
        actualWatchId = "watcher";
    }
    String actualDescription = objectPath.evaluate(IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SUMMARY.getPreferredName());
    String actualEventType = objectPath.evaluate(IncidentEvent.Fields.EVENT_ACTION.getPreferredName());
    String actualIncidentKey = objectPath.evaluate(IncidentEvent.Fields.DEDUP_KEY.getPreferredName());
    String actualClient = objectPath.evaluate(IncidentEvent.Fields.CLIENT.getPreferredName());
    String actualClientUrl = objectPath.evaluate(IncidentEvent.Fields.CLIENT_URL.getPreferredName());
    String actualSeverity = objectPath.evaluate(IncidentEvent.Fields.PAYLOAD.getPreferredName() + "." + IncidentEvent.Fields.SEVERITY.getPreferredName());
    Map<String, Object> payloadDetails = objectPath.evaluate("payload.custom_details.payload");
    Payload actualPayload = null;
    if (payloadDetails != null) {
        actualPayload = new Payload.Simple(payloadDetails);
    }
    List<IncidentEventContext> actualLinks = new ArrayList<>();
    List<Map<String, String>> linkMap = objectPath.evaluate(IncidentEvent.Fields.LINKS.getPreferredName());
    if (linkMap != null) {
        for (Map<String, String> iecValue : linkMap) {
            actualLinks.add(IncidentEventContext.link(iecValue.get("href"), iecValue.get("text")));
        }
    }
    List<IncidentEventContext> actualImages = new ArrayList<>();
    List<Map<String, String>> imgMap = objectPath.evaluate(IncidentEvent.Fields.IMAGES.getPreferredName());
    if (imgMap != null) {
        for (Map<String, String> iecValue : imgMap) {
            actualImages.add(IncidentEventContext.image(iecValue.get("src"), iecValue.get("href"), iecValue.get("alt")));
        }
    }
    assertThat(serviceKey, equalTo(actualServiceKey));
    assertThat(eventType, equalTo(actualEventType));
    assertThat(incidentKey, equalTo(actualIncidentKey));
    assertThat(description, equalTo(actualDescription));
    assertThat(watchId, equalTo(actualWatchId));
    assertThat("critical", equalTo(actualSeverity));
    assertThat(client, equalTo(actualClient));
    assertThat(clientUrl, equalTo(actualClientUrl));
    assertThat(links, equalTo(actualLinks));
    assertThat(images, equalTo(actualImages));
    assertThat(payload, equalTo(actualPayload));
}
301824.051139gwt
public void testSmoke() throws Exception {
    addSnippetClassDecl("static int foo(int i) { return i; }");
    addSnippetClassDecl("static void unusedMethod() { }");
    addSnippetClassDecl("static void usedMethod() { }");
    addSnippetClassDecl("static class UnusedClass { }");
    addSnippetClassDecl("static class UninstantiatedClass { " + "int field; native int method() /*-{ return 1; }-*/; }");
    addSnippetClassDecl("static UninstantiatedClass uninstantiatedField;");
    addSnippetClassDecl("static int unusedField;");
    addSnippetClassDecl("static int unreadField;");
    addSnippetClassDecl("static int unassignedField;");
    addSnippetClassDecl("static UninstantiatedClass returnUninstantiatedClass() { return null; }");
    addSnippetClassDecl("interface UsedInterface {", "  int unusedConstant = 2;", "  int usedConstant = 3;", "  void method2();", "}");
    addSnippetClassDecl("static class UsedClass implements UsedInterface {", "  int field2;", "  public void method2() { field2 = usedConstant; }", "  UsedClass(UninstantiatedClass c) { }", "  UsedClass(UninstantiatedClass c1, UninstantiatedClass c2) { }", "  UsedClass(UninstantiatedClass c1, int i, UninstantiatedClass c2) { field2 = i; }", "  UsedClass(UninstantiatedClass c1, int i, UninstantiatedClass c2, int j) " + "{ field2 = i + j; }", "}");
    addSnippetClassDecl("static native void usedNativeMethod(UninstantiatedClass c, UsedClass c2)", "/*-{", "  c.@test.EntryPoint.UninstantiatedClass::field = 2;", "  c.@test.EntryPoint.UninstantiatedClass::method();", "  c2.@test.EntryPoint.UsedClass::field2++;", "  c2.@test.EntryPoint.UsedClass::method2();", "}-*/;");
    addSnippetClassDecl("static native void unusedNativeMethod()", "/*-{", "}-*/;");
    addSnippetClassDecl("static void methodWithUninstantiatedParam(UninstantiatedClass c) { }");
    addSnippetClassDecl("interface UnusedInterface { void foo(); }");
    addSnippetClassDecl("interface Callback { void go(); }");
    addSnippetImport("jsinterop.annotations.JsType");
    addSnippetImport("jsinterop.annotations.JsConstructor");
    addSnippetClassDecl("@JsType interface Js { void doIt(Callback cb); }");
    addSnippetClassDecl("@JsType(isNative=true) static class JsProto { ", "public JsProto(int arg) {}", "}");
    addSnippetClassDecl("static class JsProtoImpl extends JsProto {", "public JsProtoImpl() { super(10); }", "}");
    addSnippetClassDecl("static class JsProtoImpl2 extends JsProto {", "@JsConstructor public JsProtoImpl2() { super(10); }", "}");
    addSnippetClassDecl("static class JsProtoImpl3 extends JsProto {", "public JsProtoImpl3() { super(10); }", "}");
    Result result;
    (result = optimize("void", "usedMethod();", "unreadField = 1;", "foo(unassignedField);", "returnUninstantiatedClass();", "usedNativeMethod(null, null);", "foo(uninstantiatedField.field);", "uninstantiatedField.method();", "methodWithUninstantiatedParam(null);", "new UsedClass(null);", "new UsedClass(returnUninstantiatedClass(), returnUninstantiatedClass());", "new UsedClass(returnUninstantiatedClass(), 3, returnUninstantiatedClass());", "new UsedClass(returnUninstantiatedClass(), 3, returnUninstantiatedClass(), 4);", "UninstantiatedClass localUninstantiated = null;", "JsProtoImpl jsp = new JsProtoImpl();")).intoString("EntryPoint.usedMethod();", "EntryPoint.foo(EntryPoint.unassignedField);", "EntryPoint.returnUninstantiatedClass();", "EntryPoint.usedNativeMethod(null, null);", "EntryPoint.foo(null.nullField);", "null.nullMethod();", "EntryPoint.methodWithUninstantiatedParam();", "new EntryPoint$UsedClass();", "EntryPoint.returnUninstantiatedClass();", "EntryPoint.returnUninstantiatedClass();", "new EntryPoint$UsedClass();", "int lastArg;", "new EntryPoint$UsedClass((lastArg = (EntryPoint.returnUninstantiatedClass(), 3), EntryPoint.returnUninstantiatedClass(), lastArg));", "new EntryPoint$UsedClass((EntryPoint.returnUninstantiatedClass(), 3), (EntryPoint.returnUninstantiatedClass(), 4));", "new EntryPoint$JsProtoImpl();");
    assertNotNull(result.findMethod("usedMethod"));
    assertNotNull(result.findField("unassignedField"));
    assertNotNull(result.findMethod("usedNativeMethod"));
    assertNotNull(result.findMethod("returnUninstantiatedClass"));
    assertNotNull(result.findMethod("methodWithUninstantiatedParam"));
    assertNotNull(result.findClass("EntryPoint$UsedClass"));
    assertNotNull(result.findClass("EntryPoint$UsedInterface"));
    assertNull(result.findMethod("unusedMethod"));
    assertNull(result.findField("unusedField"));
    assertNull(result.findField("unreadField"));
    assertNull(result.findClass("EntryPoint$UnusedClass"));
    assertNull(result.findMethod("unusedNativeMethod"));
    assertNull(result.findField("uninstantiatedField"));
    assertNull(result.findClass("EntryPoint$UnusedInterface"));
    assertNull(result.findClass("UninstantiatedClass"));
    assertEquals("static null returnUninstantiatedClass(){\n" + "  return null;\n" + "}", result.findMethod("returnUninstantiatedClass").toSource());
    assertEquals("static void methodWithUninstantiatedParam(){\n" + "}", result.findMethod("methodWithUninstantiatedParam").toSource());
    assertEquals("[final null nullField, int field2]", ((JsniMethodBody) result.findMethod("usedNativeMethod").getBody()).getJsniFieldRefs().toString());
    assertEquals("[public final null nullMethod(), public void method2()]", ((JsniMethodBody) result.findMethod("usedNativeMethod").getBody()).getJsniMethodRefs().toString());
    assertEquals("interface EntryPoint$UsedInterface {\n" + "  final static int usedConstant\n\n" + "  private static final void $clinit(){\n" + "    final static int usedConstant = 3;\n" + "  }\n" + "\n" + "}", result.findClass("EntryPoint$UsedInterface").toSource());
    assertEquals("public EntryPoint$JsProtoImpl(){\n" + "  this.EntryPoint$JsProto.EntryPoint$JsProto(10);\n" + "  this.$init();\n" + "}", findMethod(result.findClass("EntryPoint$JsProtoImpl"), "EntryPoint$JsProtoImpl").toSource());
    assertNull(result.findClass("EntryPoint$JsProtoImpl3"));
    assertNotNull(result.findClass("EntryPoint$JsProtoImpl2"));
}
303716.651107gwt
public void testExponential() {
    String str;
    str = NumberFormat.getFormat("0.####E0").format(0.01234);
    assertEquals("1.234E-2", str);
    str = NumberFormat.getFormat("00.000E00").format(0.01234);
    assertEquals("12.340E-03", str);
    str = NumberFormat.getFormat("##0.######E000").format(0.01234);
    assertEquals("12.34E-003", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(0.01234);
    assertEquals("1.234E-2", str);
    str = NumberFormat.getFormat("0.####E0").format(123456789);
    assertEquals("1.2346E8", str);
    str = NumberFormat.getFormat("00.000E00").format(123456789);
    assertEquals("12.346E07", str);
    str = NumberFormat.getFormat("##0.######E000").format(123456789);
    assertEquals("123.456789E006", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(123456789);
    assertEquals("1.235E8", str);
    str = NumberFormat.getFormat("0.####E0").format(1.23e300);
    assertEquals("1.23E300", str);
    str = NumberFormat.getFormat("00.000E00").format(1.23e300);
    assertEquals("12.300E299", str);
    str = NumberFormat.getFormat("##0.######E000").format(1.23e300);
    assertEquals("1.23E300", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(1.23e300);
    assertEquals("1.23E300", str);
    str = NumberFormat.getFormat("0.####E0").format(-3.141592653e-271);
    assertEquals("-3.1416E-271", str);
    str = NumberFormat.getFormat("00.000E00").format(-3.141592653e-271);
    assertEquals("-31.416E-272", str);
    str = NumberFormat.getFormat("##0.######E000").format(-3.141592653e-271);
    assertEquals("-314.159265E-273", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(-3.141592653e-271);
    assertEquals("[3.142E-271]", str);
    str = NumberFormat.getFormat("0.####E0").format(0);
    assertEquals("0E0", str);
    str = NumberFormat.getFormat("00.000E00").format(0);
    assertEquals("00.000E00", str);
    str = NumberFormat.getFormat("##0.######E000").format(0);
    assertEquals("0E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(0);
    assertEquals("0E0", str);
    str = NumberFormat.getFormat("0.####E0").format(-1);
    assertEquals("-1E0", str);
    str = NumberFormat.getFormat("00.000E00").format(-1);
    assertEquals("-10.000E-01", str);
    str = NumberFormat.getFormat("##0.######E000").format(-1);
    assertEquals("-1E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(-1);
    assertEquals("[1E0]", str);
    str = NumberFormat.getFormat("0.####E0").format(1);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("00.000E00").format(1);
    assertEquals("10.000E-01", str);
    str = NumberFormat.getFormat("##0.######E000").format(1);
    assertEquals("1E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(1);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("#E0").format(12345.0);
    assertEquals("1E4", str);
    str = NumberFormat.getFormat("0E0").format(12345.0);
    assertEquals("1E4", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345.0);
    assertEquals("12.345E3", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345.00001);
    assertEquals("12.345E3", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345);
    assertEquals("12.345E3", str);
    str = NumberFormat.getFormat("##0.####E0").format(789.12346e-9);
    assertEquals("789.1235E-9", str);
    str = NumberFormat.getFormat("##0.####E0").format(780.e-9);
    assertEquals("780E-9", str);
    str = NumberFormat.getFormat(".###E0").format(45678.0);
    assertEquals(".457E5", str);
    str = NumberFormat.getFormat(".###E0").format(0);
    assertEquals(".0E0", str);
    str = NumberFormat.getFormat("#E0").format(45678000);
    assertEquals("5E7", str);
    str = NumberFormat.getFormat("##E0").format(45678000);
    assertEquals("46E6", str);
    str = NumberFormat.getFormat("####E0").format(45678000);
    assertEquals("4568E4", str);
    str = NumberFormat.getFormat("0E0").format(45678000);
    assertEquals("5E7", str);
    str = NumberFormat.getFormat("00E0").format(45678000);
    assertEquals("46E6", str);
    str = NumberFormat.getFormat("000E0").format(45678000);
    assertEquals("457E5", str);
    str = NumberFormat.getFormat("###E0").format(0.0000123);
    assertEquals("12E-6", str);
    str = NumberFormat.getFormat("###E0").format(0.000123);
    assertEquals("123E-6", str);
    str = NumberFormat.getFormat("###E0").format(0.00123);
    assertEquals("1E-3", str);
    str = NumberFormat.getFormat("###E0").format(0.0123);
    assertEquals("12E-3", str);
    str = NumberFormat.getFormat("###E0").format(0.123);
    assertEquals("123E-3", str);
    str = NumberFormat.getFormat("###E0").format(1.23);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("###E0").format(12.3);
    assertEquals("12E0", str);
    str = NumberFormat.getFormat("###E0").format(123.0);
    assertEquals("123E0", str);
    str = NumberFormat.getFormat("###E0").format(1230.0);
    assertEquals("1E3", str);
}
303726.721107gwt
public void testExponential() {
    String str;
    str = NumberFormat.getFormat("0.####E0").format(0.01234);
    assertEquals("1,234E-2", str);
    str = NumberFormat.getFormat("00.000E00").format(0.01234);
    assertEquals("12,340E-03", str);
    str = NumberFormat.getFormat("##0.######E000").format(0.01234);
    assertEquals("12,34E-003", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(0.01234);
    assertEquals("1,234E-2", str);
    str = NumberFormat.getFormat("0.####E0").format(123456789);
    assertEquals("1,2346E8", str);
    str = NumberFormat.getFormat("00.000E00").format(123456789);
    assertEquals("12,346E07", str);
    str = NumberFormat.getFormat("##0.######E000").format(123456789);
    assertEquals("123,456789E006", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(123456789);
    assertEquals("1,235E8", str);
    str = NumberFormat.getFormat("0.####E0").format(1.23e300);
    assertEquals("1,23E300", str);
    str = NumberFormat.getFormat("00.000E00").format(1.23e300);
    assertEquals("12,300E299", str);
    str = NumberFormat.getFormat("##0.######E000").format(1.23e300);
    assertEquals("1,23E300", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(1.23e300);
    assertEquals("1,23E300", str);
    str = NumberFormat.getFormat("0.####E0").format(-3.141592653e-271);
    assertEquals("-3,1416E-271", str);
    str = NumberFormat.getFormat("00.000E00").format(-3.141592653e-271);
    assertEquals("-31,416E-272", str);
    str = NumberFormat.getFormat("##0.######E000").format(-3.141592653e-271);
    assertEquals("-314,159265E-273", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(-3.141592653e-271);
    assertEquals("[3,142E-271]", str);
    str = NumberFormat.getFormat("0.####E0").format(0);
    assertEquals("0E0", str);
    str = NumberFormat.getFormat("00.000E00").format(0);
    assertEquals("00,000E00", str);
    str = NumberFormat.getFormat("##0.######E000").format(0);
    assertEquals("0E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(0);
    assertEquals("0E0", str);
    str = NumberFormat.getFormat("0.####E0").format(-1);
    assertEquals("-1E0", str);
    str = NumberFormat.getFormat("00.000E00").format(-1);
    assertEquals("-10,000E-01", str);
    str = NumberFormat.getFormat("##0.######E000").format(-1);
    assertEquals("-1E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(-1);
    assertEquals("[1E0]", str);
    str = NumberFormat.getFormat("0.####E0").format(1);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("00.000E00").format(1);
    assertEquals("10,000E-01", str);
    str = NumberFormat.getFormat("##0.######E000").format(1);
    assertEquals("1E000", str);
    str = NumberFormat.getFormat("0.###E0;[0.###E0]").format(1);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("#E0").format(12345.0);
    assertEquals("1E4", str);
    str = NumberFormat.getFormat("0E0").format(12345.0);
    assertEquals("1E4", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345.0);
    assertEquals("12,345E3", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345.00001);
    assertEquals("12,345E3", str);
    str = NumberFormat.getFormat("##0.###E0").format(12345);
    assertEquals("12,345E3", str);
    str = NumberFormat.getFormat("##0.####E0").format(789.12346e-9);
    assertEquals("789,1235E-9", str);
    str = NumberFormat.getFormat("##0.####E0").format(780.e-9);
    assertEquals("780E-9", str);
    str = NumberFormat.getFormat(".###E0").format(45678.0);
    assertEquals(",457E5", str);
    str = NumberFormat.getFormat(".###E0").format(0);
    assertEquals(",0E0", str);
    str = NumberFormat.getFormat("#E0").format(45678000);
    assertEquals("5E7", str);
    str = NumberFormat.getFormat("##E0").format(45678000);
    assertEquals("46E6", str);
    str = NumberFormat.getFormat("####E0").format(45678000);
    assertEquals("4568E4", str);
    str = NumberFormat.getFormat("0E0").format(45678000);
    assertEquals("5E7", str);
    str = NumberFormat.getFormat("00E0").format(45678000);
    assertEquals("46E6", str);
    str = NumberFormat.getFormat("000E0").format(45678000);
    assertEquals("457E5", str);
    str = NumberFormat.getFormat("###E0").format(0.0000123);
    assertEquals("12E-6", str);
    str = NumberFormat.getFormat("###E0").format(0.000123);
    assertEquals("123E-6", str);
    str = NumberFormat.getFormat("###E0").format(0.00123);
    assertEquals("1E-3", str);
    str = NumberFormat.getFormat("###E0").format(0.0123);
    assertEquals("12E-3", str);
    str = NumberFormat.getFormat("###E0").format(0.123);
    assertEquals("123E-3", str);
    str = NumberFormat.getFormat("###E0").format(1.23);
    assertEquals("1E0", str);
    str = NumberFormat.getFormat("###E0").format(12.3);
    assertEquals("12E0", str);
    str = NumberFormat.getFormat("###E0").format(123.0);
    assertEquals("123E0", str);
    str = NumberFormat.getFormat("###E0").format(1230.0);
    assertEquals("1E3", str);
}
302266.351129hadoop
public void initializeMemberVariables() {
    xmlFilename = "core-default.xml";
    configurationClasses = new Class[] { CommonConfigurationKeys.class, CommonConfigurationKeysPublic.class, LocalConfigKeys.class, FtpConfigKeys.class, SshFenceByTcpPort.class, LdapGroupsMapping.class, ZKFailoverController.class, SSLFactory.class, CompositeGroupsMapping.class, CodecUtil.class, RuleBasedLdapGroupsMapping.class };
    xmlPropsToSkipCompare = new HashSet<>();
    xmlPrefixToSkipCompare = new HashSet<>();
    configurationPropsToSkipCompare = new HashSet<>();
    errorIfMissingConfigProps = true;
    errorIfMissingXmlProps = false;
    xmlPropsToSkipCompare.add("fs.ftp.password.localhost");
    xmlPropsToSkipCompare.add("fs.ftp.user.localhost");
    xmlPropsToSkipCompare.add("fs.ftp.data.connection.mode");
    xmlPropsToSkipCompare.add("fs.ftp.transfer.mode");
    xmlPropsToSkipCompare.add("fs.ftp.timeout");
    xmlPropsToSkipCompare.add("hadoop.tmp.dir");
    xmlPropsToSkipCompare.add("nfs3.mountd.port");
    xmlPropsToSkipCompare.add("nfs3.server.port");
    xmlPropsToSkipCompare.add("fs.viewfs.rename.strategy");
    xmlPrefixToSkipCompare.add("fs.s3a.");
    xmlPrefixToSkipCompare.add("fs.o3fs.");
    xmlPrefixToSkipCompare.add("fs.ftp.impl");
    xmlPrefixToSkipCompare.add("fs.wasb.impl");
    xmlPrefixToSkipCompare.add("fs.wasbs.impl");
    xmlPrefixToSkipCompare.add("fs.azure.");
    xmlPrefixToSkipCompare.add("fs.abfs.impl");
    xmlPrefixToSkipCompare.add("fs.abfss.impl");
    xmlPrefixToSkipCompare.add("adl.");
    xmlPrefixToSkipCompare.add("fs.adl.");
    xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.abfss.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.file.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ftp.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.gs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.hdfs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.http.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.https.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.ofs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.o3fs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.oss.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.swebhdfs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl");
    xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl");
    xmlPropsToSkipCompare.add("fs.azure.sas.expiry.period");
    xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
    xmlPropsToSkipCompare.add("fs.azure.secure.mode");
    xmlPropsToSkipCompare.add("fs.azure.authorization");
    xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
    xmlPropsToSkipCompare.add("fs.azure.saskey.usecontainersaskeyforallaccess");
    xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
    xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.overflow.trigger.failover");
    xmlPropsToSkipCompare.add("ipc.callqueue.overflow.trigger.failover");
    xmlPropsToSkipCompare.add("ipc.[port_number].backoff.enable");
    xmlPropsToSkipCompare.add("ipc.backoff.enable");
    xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.impl");
    xmlPropsToSkipCompare.add("ipc.callqueue.impl");
    xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.impl");
    xmlPropsToSkipCompare.add("ipc.scheduler.impl");
    xmlPropsToSkipCompare.add("ipc.[port_number].scheduler.priority.levels");
    xmlPropsToSkipCompare.add("ipc.[port_number].callqueue.capacity.weights");
    xmlPropsToSkipCompare.add("ipc.[port_number].faircallqueue.multiplexer.weights");
    xmlPropsToSkipCompare.add("ipc.[port_number].identity-provider.impl");
    xmlPropsToSkipCompare.add("ipc.identity-provider.impl");
    xmlPropsToSkipCompare.add("ipc.[port_number].cost-provider.impl");
    xmlPropsToSkipCompare.add("ipc.cost-provider.impl");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.period-ms");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.decay-factor");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.thresholds");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.backoff.responsetime.enable");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.backoff.responsetime.thresholds");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.metrics.top.user.count");
    xmlPropsToSkipCompare.add("ipc.[port_number].decay-scheduler.service-users");
    xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockshared");
    xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockexclusive");
    xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.handler");
    xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.lockfree");
    xmlPropsToSkipCompare.add("ipc.[port_number].weighted-cost.response");
    configurationPropsToSkipCompare.add(CommonConfigurationKeysPublic.IO_SORT_MB_KEY);
    configurationPropsToSkipCompare.add(CommonConfigurationKeysPublic.IO_SORT_FACTOR_KEY);
    configurationPropsToSkipCompare.add("dr.who");
    xmlPropsToSkipCompare.add("io.bytes.per.checksum");
    xmlPropsToSkipCompare.add("hadoop.http.filter.initializers");
    xmlPrefixToSkipCompare.add(HttpCrossOriginFilterInitializer.PREFIX);
    xmlPrefixToSkipCompare.add("fs.AbstractFileSystem.");
    xmlPrefixToSkipCompare.add("dfs.ha.fencing.ssh.");
    xmlPrefixToSkipCompare.add("hadoop.registry.");
    xmlPrefixToSkipCompare.add("hadoop.http.authentication.");
    xmlPropsToSkipCompare.add(KMSClientProvider.AUTH_RETRY);
    xmlPropsToSkipCompare.add("hadoop.workaround.non.threadsafe.getpwuid");
    xmlPropsToSkipCompare.add("dfs.ha.fencing.methods");
    xmlPrefixToSkipCompare.add(CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX);
    xmlPropsToSkipCompare.add("hadoop.common.configuration.version");
    xmlPropsToSkipCompare.add("fs.har.impl.disable.cache");
    xmlPropsToSkipCompare.add("hadoop.htrace.span.receiver.classes");
    xmlPropsToSkipCompare.add("ha.zookeeper.parent-znode");
    xmlPropsToSkipCompare.add("ha.zookeeper.session-timeout.ms");
    xmlPrefixToSkipCompare.add(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX);
    xmlPropsToSkipCompare.add("hadoop.kerberos.kinit.command");
    xmlPropsToSkipCompare.add("hadoop.rpc.socket.factory.class.ClientProtocol");
    xmlPropsToSkipCompare.add("io.compression.codec.bzip2.library");
    xmlPropsToSkipCompare.add("io.seqfile.local.dir");
    xmlPropsToSkipCompare.add("hadoop.http.sni.host.check.enabled");
}
302148.0519101hadoop
public static int run(String[] args) throws Exception {
    Options options = buildOptions();
    if (args.length == 0) {
        printUsage();
        return 0;
    }
    if (args.length == 1 && isHelpOption(args[0])) {
        printUsage();
        return 0;
    }
    CommandLineParser parser = new PosixParser();
    CommandLine cmd;
    try {
        cmd = parser.parse(options, args);
    } catch (ParseException e) {
        System.out.println("Error parsing command-line options: ");
        printUsage();
        return -1;
    }
    if (cmd.hasOption("h")) {
        printUsage();
        return -1;
    }
    String inputFile = cmd.getOptionValue("i");
    String processor = cmd.getOptionValue("p", "Web");
    String outputFile = cmd.getOptionValue("o", "-");
    String delimiter = cmd.getOptionValue("delimiter", PBImageTextWriter.DEFAULT_DELIMITER);
    String tempPath = cmd.getOptionValue("t", "");
    int threads = Integer.parseInt(cmd.getOptionValue("m", "1"));
    Configuration conf = new Configuration();
    PrintStream out = null;
    try {
        out = outputFile.equals("-") || "REVERSEXML".equalsIgnoreCase(processor) ? System.out : new PrintStream(outputFile, "UTF-8");
        switch(StringUtils.toUpperCase(processor)) {
            case "FILEDISTRIBUTION":
                long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
                int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
                boolean formatOutput = cmd.hasOption("format");
                try (RandomAccessFile r = new RandomAccessFile(inputFile, "r")) {
                    new FileDistributionCalculator(conf, maxSize, step, formatOutput, out).visit(r);
                }
                break;
            case "XML":
                try (RandomAccessFile r = new RandomAccessFile(inputFile, "r")) {
                    new PBImageXmlWriter(conf, out).visit(r);
                }
                break;
            case "REVERSEXML":
                try {
                    OfflineImageReconstructor.run(inputFile, outputFile);
                } catch (Exception e) {
                    System.err.println("OfflineImageReconstructor failed: " + e.getMessage());
                    e.printStackTrace(System.err);
                    ExitUtil.terminate(1);
                }
                break;
            case "WEB":
                String addr = cmd.getOptionValue("addr", "localhost:5978");
                try (WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr(addr), conf)) {
                    viewer.start(inputFile);
                }
                break;
            case "DELIMITED":
                boolean printStoragePolicy = cmd.hasOption("sp");
                boolean printECPolicy = cmd.hasOption("ec");
                try (PBImageDelimitedTextWriter writer = new PBImageDelimitedTextWriter(out, delimiter, tempPath, printStoragePolicy, printECPolicy, threads, outputFile, conf)) {
                    writer.visit(inputFile);
                }
                break;
            case "DETECTCORRUPTION":
                try (PBImageCorruptionDetector detector = new PBImageCorruptionDetector(out, delimiter, tempPath)) {
                    detector.visit(inputFile);
                }
                break;
            default:
                System.err.println("Invalid processor specified : " + processor);
                printUsage();
                return -1;
        }
        return 0;
    } catch (EOFException e) {
        System.err.println("Input file ended unexpectedly. Exiting");
    } catch (IOException e) {
        System.err.println("Encountered exception.  Exiting: " + e.getMessage());
        e.printStackTrace(System.err);
    } finally {
        if (out != null && out != System.out) {
            out.close();
        }
    }
    return -1;
}
302246.181898hadoop
private void dumpSnapshotDiffSection(InputStream in) throws IOException {
    out.print("<" + SNAPSHOT_DIFF_SECTION_NAME + ">");
    while (true) {
        SnapshotDiffSection.DiffEntry e = SnapshotDiffSection.DiffEntry.parseDelimitedFrom(in);
        if (e == null) {
            break;
        }
        switch(e.getType()) {
            case FILEDIFF:
                out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
                break;
            case DIRECTORYDIFF:
                out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
                break;
            default:
                throw new IOException("unknown DiffEntry type " + e.getType());
        }
        o(SNAPSHOT_DIFF_SECTION_INODE_ID, e.getInodeId());
        o(SNAPSHOT_DIFF_SECTION_COUNT, e.getNumOfDiff());
        switch(e.getType()) {
            case FILEDIFF:
                {
                    for (int i = 0; i < e.getNumOfDiff(); ++i) {
                        out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">");
                        SnapshotDiffSection.FileDiff f = SnapshotDiffSection.FileDiff.parseDelimitedFrom(in);
                        o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, f.getSnapshotId()).o(SNAPSHOT_DIFF_SECTION_SIZE, f.getFileSize()).o(SECTION_NAME, f.getName().toStringUtf8());
                        INodeSection.INodeFile snapshotCopy = f.getSnapshotCopy();
                        if (snapshotCopy != null) {
                            out.print("<" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">");
                            dumpINodeFile(snapshotCopy);
                            out.print("</" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">\n");
                        }
                        if (f.getBlocksCount() > 0) {
                            out.print("<" + INODE_SECTION_BLOCKS + ">");
                            for (BlockProto b : f.getBlocksList()) {
                                out.print("<" + INODE_SECTION_BLOCK + ">");
                                o(SECTION_ID, b.getBlockId()).o(INODE_SECTION_GENSTAMP, b.getGenStamp()).o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
                                out.print("</" + INODE_SECTION_BLOCK + ">\n");
                            }
                            out.print("</" + INODE_SECTION_BLOCKS + ">\n");
                        }
                        out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">\n");
                    }
                }
                break;
            case DIRECTORYDIFF:
                {
                    for (int i = 0; i < e.getNumOfDiff(); ++i) {
                        out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">");
                        SnapshotDiffSection.DirectoryDiff d = SnapshotDiffSection.DirectoryDiff.parseDelimitedFrom(in);
                        o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, d.getSnapshotId()).o(SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE, d.getChildrenSize()).o(SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT, d.getIsSnapshotRoot()).o(SECTION_NAME, d.getName().toStringUtf8());
                        if (d.hasSnapshotCopy()) {
                            out.print("<" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">");
                            dumpINodeDirectory(d.getSnapshotCopy());
                            out.print("</" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">\n");
                        }
                        o(SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE, d.getCreatedListSize());
                        for (long did : d.getDeletedINodeList()) {
                            o(SNAPSHOT_DIFF_SECTION_DELETED_INODE, did);
                        }
                        for (int dRefid : d.getDeletedINodeRefList()) {
                            o(SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF, dRefid);
                        }
                        for (int j = 0; j < d.getCreatedListSize(); ++j) {
                            SnapshotDiffSection.CreatedListEntry ce = SnapshotDiffSection.CreatedListEntry.parseDelimitedFrom(in);
                            out.print("<" + SNAPSHOT_DIFF_SECTION_CREATED + ">");
                            o(SECTION_NAME, ce.getName().toStringUtf8());
                            out.print("</" + SNAPSHOT_DIFF_SECTION_CREATED + ">\n");
                        }
                        out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">\n");
                    }
                    break;
                }
            default:
                break;
        }
        switch(e.getType()) {
            case FILEDIFF:
                out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
                break;
            case DIRECTORYDIFF:
                out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
                break;
            default:
                throw new IOException("unknown DiffEntry type " + e.getType());
        }
    }
    out.print("</" + SNAPSHOT_DIFF_SECTION_NAME + ">\n");
}
302759.5311105hadoop
public void testMoverWithStripedFile() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConfWithStripe(conf);
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        cluster.getFileSystem().enableErasureCodingPolicy(StripedFileTestUtil.getDefaultECPolicy().getName());
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String barDir = "/bar";
        client.mkdirs(barDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
        client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        final String fooFile = "/bar/foo";
        long fileLen = 20 * defaultBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
        LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null, null, null);
        cluster.triggerHeartbeats();
        client.setStoragePolicy(barDir, "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        waitForUpdatedStorageType(client, fooFile, fileLen, StorageType.ARCHIVE);
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null, null, null);
        cluster.triggerHeartbeats();
        client.setStoragePolicy(barDir, "ONE_SSD");
        rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
303848.373109hadoop
public void testDeleteCurrentFileDirectory() throws Exception {
    Path deleteDir = new Path(subsub, "deleteDir");
    Path deleteFile = new Path(deleteDir, "deleteFile");
    Path noChangeDirParent = new Path(sub, "noChangeDirParent");
    Path noChangeDir = new Path(noChangeDirParent, "noChangeDir");
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    DFSTestUtil.createFile(hdfs, deleteFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    Path metaChangeFile1 = new Path(subsub, "metaChangeFile1");
    DFSTestUtil.createFile(hdfs, metaChangeFile1, BLOCKSIZE, REPLICATION, seed);
    Path metaChangeFile2 = new Path(noChangeDir, "metaChangeFile2");
    DFSTestUtil.createFile(hdfs, metaChangeFile2, BLOCKSIZE, REPLICATION, seed);
    hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    checkQuotaUsageComputation(dir, 10, BLOCKSIZE * REPLICATION * 4);
    hdfs.delete(deleteDir, true);
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
    hdfs.delete(noChangeDirParent, true);
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName() + "/" + noChangeDirParent.getName() + "/" + noChangeDir.getName());
    INodeDirectory snapshotNode = (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    INodeFile metaChangeFile2SCopy = children.get(0).asFile();
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertTrue(metaChangeFile2SCopy.isWithSnapshot());
    assertFalse(metaChangeFile2SCopy.isUnderConstruction());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 10L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName());
    INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString()).asDirectory();
    assertTrue(snapshotNode4Sub.isWithSnapshot());
    assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
    assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    INodeFile metaChangeFile1SCopy = child.asFile();
    assertTrue(metaChangeFile1SCopy.isWithSnapshot());
    assertFalse(metaChangeFile1SCopy.isUnderConstruction());
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
303518.585105hadoop
private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr, int dirNodeNum) throws Exception {
    Path modDir = modDirStr.isEmpty() ? snapshotRoot : new Path(snapshotRoot, modDirStr);
    Path file10 = new Path(modDir, "file10");
    Path file11 = new Path(modDir, "file11");
    Path file12 = new Path(modDir, "file12");
    Path file13 = new Path(modDir, "file13");
    Path file14 = new Path(modDir, "file14");
    Path file15 = new Path(modDir, "file15");
    DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
    DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
    DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
    DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
    SnapshotTestHelper.createSnapshot(hdfs, snapshotRoot, "s1");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
    hdfs.delete(file11, true);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
    hdfs.setReplication(file12, REPLICATION);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 9 * BLOCKSIZE);
    hdfs.setReplication(file13, REPLICATION);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 10 * BLOCKSIZE);
    DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 5, 13 * BLOCKSIZE);
    DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
    hdfs.createSnapshot(snapshotRoot, "s2");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
    DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.delete(file12, true);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.setReplication(file13, (short) (REPLICATION - 2));
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.delete(file14, true);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.setReplication(file15, REPLICATION_1);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.createSnapshot(snapshotRoot, "s3");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
    hdfs.setReplication(file10, REPLICATION);
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 20 * BLOCKSIZE);
    Path file10_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file10");
    Path file11_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file11");
    Path file12_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file12");
    Path file13_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file13");
    Path file14_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2", modDirStr + "file14");
    Path file15_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2", modDirStr + "file15");
    FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
    INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(file14_s2.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks_14 = file14Node.getBlocks();
    TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir, blockmanager);
    hdfs.deleteSnapshot(snapshotRoot, "s2");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 14 * BLOCKSIZE);
    FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
    assertEquals(statusBeforeDeletion10.toString(), statusAfterDeletion10.toString());
    assertEquals(statusBeforeDeletion11.toString(), statusAfterDeletion11.toString());
    assertEquals(statusBeforeDeletion12.toString(), statusAfterDeletion12.toString());
    assertEquals(statusBeforeDeletion13.toString(), statusAfterDeletion13.toString());
    TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir, blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir, blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir, blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir, blockmanager);
    Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file14");
    Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file15");
    assertFalse(hdfs.exists(file14_s1));
    assertFalse(hdfs.exists(file15_s1));
    for (BlockInfo b : blocks_14) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
    for (BlockInfo b : nodeFile13.getBlocks()) {
        assertEquals(REPLICATION_1, b.getReplication());
    }
    TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir, blockmanager);
    INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
    for (BlockInfo b : nodeFile12.getBlocks()) {
        assertEquals(REPLICATION_1, b.getReplication());
    }
}
302039.9612114hadoop
public void testTruncateFailure() throws IOException {
    int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
    int toTruncate = 1;
    byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
    final Path dir = new Path("/dir");
    final Path p = new Path(dir, "testTruncateFailure");
    {
        FSDataOutputStream out = fs.create(p, false, BLOCK_SIZE, REPLICATION, BLOCK_SIZE);
        out.write(contents, 0, startingFileSize);
        try {
            fs.truncate(p, 0);
            fail("Truncate must fail on open file.");
        } catch (IOException expected) {
            GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
        } finally {
            out.close();
        }
    }
    {
        FSDataOutputStream out = fs.append(p);
        try {
            fs.truncate(p, 0);
            fail("Truncate must fail for append.");
        } catch (IOException expected) {
            GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
        } finally {
            out.close();
        }
    }
    try {
        fs.truncate(p, -1);
        fail("Truncate must fail for a negative new length.");
    } catch (HadoopIllegalArgumentException expected) {
        GenericTestUtils.assertExceptionContains("Cannot truncate to a negative file size", expected);
    }
    try {
        fs.truncate(p, startingFileSize + 1);
        fail("Truncate must fail for a larger new length.");
    } catch (Exception expected) {
        GenericTestUtils.assertExceptionContains("Cannot truncate to a larger file size", expected);
    }
    try {
        fs.truncate(dir, 0);
        fail("Truncate must fail for a directory.");
    } catch (Exception expected) {
        GenericTestUtils.assertExceptionContains("Path is not a file", expected);
    }
    try {
        fs.truncate(new Path(dir, "non-existing"), 0);
        fail("Truncate must fail for a non-existing file.");
    } catch (Exception expected) {
        GenericTestUtils.assertExceptionContains("File does not exist", expected);
    }
    fs.setPermission(p, FsPermission.createImmutable((short) 0664));
    {
        final UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "foo" });
        try {
            final FileSystem foofs = DFSTestUtil.getFileSystemAs(fooUgi, conf);
            foofs.truncate(p, 0);
            fail("Truncate must fail for no WRITE permission.");
        } catch (Exception expected) {
            GenericTestUtils.assertExceptionContains("Permission denied", expected);
        }
    }
    cluster.shutdownDataNodes();
    NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);
    int newLength = startingFileSize - toTruncate;
    boolean isReady = fs.truncate(p, newLength);
    assertThat("truncate should have triggered block recovery.", isReady, is(false));
    {
        try {
            fs.truncate(p, 0);
            fail("Truncate must fail since a truncate is already in progress.");
        } catch (IOException expected) {
            GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
        }
    }
    boolean recoveryTriggered = false;
    for (int i = 0; i < RECOVERY_ATTEMPTS; i++) {
        String leaseHolder = NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), p.toUri().getPath());
        if (leaseHolder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
            recoveryTriggered = true;
            break;
        }
        try {
            Thread.sleep(SLEEP);
        } catch (InterruptedException ignored) {
        }
    }
    assertThat("lease recovery should have occurred in ~" + SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
    cluster.startDataNodes(conf, DATANODE_NUM, true, StartupOption.REGULAR, null);
    cluster.waitActive();
    checkBlockRecovery(p);
    NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY, DFSConfigKeys.DFS_LEASE_HARDLIMIT_DEFAULT) * 1000);
    checkFullFile(p, newLength, contents);
    fs.delete(p, false);
}
302629.1712108hadoop
public void testXceiverCountInternal(int minMaintenanceR) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, minMaintenanceR);
    MiniDFSCluster cluster = null;
    final int nodes = 8;
    final int fileCount = 5;
    final short fileRepl = 3;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
        List<DataNode> datanodes = cluster.getDataNodes();
        final DistributedFileSystem fs = cluster.getFileSystem();
        triggerHeartbeats(datanodes);
        int expectedTotalLoad = 0;
        int expectedInServiceNodes = nodes;
        int expectedInServiceLoad = 0;
        checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
        for (int i = 0; i < nodes / 2; i++) {
            DataNode dn = datanodes.get(i);
            DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
            dn.shutdown();
            DFSTestUtil.setDatanodeDead(dnd);
            BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
            startDecommissionOrMaintenance(dnm, dnd, (i % 2 == 0));
            expectedInServiceNodes--;
            assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
            assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
            stopDecommissionOrMaintenance(dnm, dnd, (i % 2 == 0));
            assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
        }
        cluster.restartDataNodes();
        cluster.waitActive();
        datanodes = cluster.getDataNodes();
        expectedInServiceNodes = nodes;
        assertEquals(nodes, datanodes.size());
        checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
        DFSOutputStream[] streams = new DFSOutputStream[fileCount];
        for (int i = 0; i < fileCount; i++) {
            streams[i] = (DFSOutputStream) fs.create(new Path("/f" + i), fileRepl).getWrappedStream();
            streams[i].write("1".getBytes());
            streams[i].hsync();
            expectedTotalLoad += 2 * fileRepl;
            expectedInServiceLoad += 2 * fileRepl;
        }
        triggerHeartbeats(datanodes);
        checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
        for (int i = 0; i < fileRepl; i++) {
            expectedInServiceNodes--;
            DatanodeDescriptor dnd = dnm.getDatanode(datanodes.get(i).getDatanodeId());
            expectedInServiceLoad -= dnd.getXceiverCount();
            startDecommissionOrMaintenance(dnm, dnd, (i % 2 == 0));
            DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
            Thread.sleep(100);
            checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
        }
        for (int i = 0; i < fileCount; i++) {
            int adminOps = 0;
            for (DatanodeInfo dni : streams[i].getPipeline()) {
                DatanodeDescriptor dnd = dnm.getDatanode(dni);
                expectedTotalLoad -= 2;
                if (!dnd.isInService()) {
                    adminOps++;
                } else {
                    expectedInServiceLoad -= 2;
                }
            }
            try {
                streams[i].close();
            } catch (IOException ioe) {
                if (adminOps < fileRepl) {
                    throw ioe;
                }
            }
            triggerHeartbeats(datanodes);
            checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
        }
        for (int i = 0; i < nodes; i++) {
            DataNode dn = datanodes.get(i);
            dn.shutdown();
            DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
            DFSTestUtil.setDatanodeDead(dnDesc);
            BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
            assertEquals(nodes - 1 - i, namesystem.getNumLiveDataNodes());
            if (i >= fileRepl) {
                expectedInServiceNodes--;
            }
            assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
            assertEquals(0, getInServiceXceiverAverage(namesystem), EPSILON);
        }
        checkClusterHealth(0, namesystem, 0.0, 0, 0.0);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
301803.849117hadoop
public void testVerifyMissingBlockGroupsMetrics() throws Exception {
    MiniDFSCluster cluster = null;
    DistributedFileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
        int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
        int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
        int totalSize = dataBlocks + parityBlocks;
        cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(totalSize).build();
        fs = cluster.getFileSystem();
        fs.enableErasureCodingPolicy(StripedFileTestUtil.getDefaultECPolicy().getName());
        Path ecDirPath = new Path("/striped");
        fs.mkdir(ecDirPath, FsPermission.getDirDefault());
        fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
        Path file = new Path(ecDirPath, "corrupted");
        final int length = cellSize * dataBlocks;
        final byte[] bytes = StripedFileTestUtil.generateBytes(length);
        DFSTestUtil.writeFile(fs, file, bytes);
        LocatedStripedBlock lsb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
        final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
        for (int i = 0; i < parityBlocks + 1; i++) {
            int ipcPort = blks[i].getLocations()[0].getIpcPort();
            cluster.corruptReplica(cluster.getDataNode(ipcPort), blks[i].getBlock());
        }
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
        }
        try {
            IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
        } catch (IOException ie) {
            assertTrue(ie.getMessage().contains("missingChunksNum=" + (parityBlocks + 1)));
        }
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName replStateMBeanName = new ObjectName("Hadoop:service=NameNode,name=ReplicatedBlocksState");
        ObjectName ecBlkGrpStateMBeanName = new ObjectName("Hadoop:service=NameNode,name=ECBlockGroupsState");
        ObjectName namenodeMXBeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        long expectedMissingBlockCount = 1L;
        long expectedCorruptBlockCount = 1L;
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                try {
                    Long numMissingBlocks = (Long) mbs.getAttribute(namenodeMXBeanName, "NumberOfMissingBlocks");
                    if (numMissingBlocks == expectedMissingBlockCount) {
                        return true;
                    }
                } catch (Exception e) {
                    Assert.fail("Caught unexpected exception.");
                }
                return false;
            }
        }, 1000, 60000);
        BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
        long totalMissingBlocks = cluster.getNamesystem().getMissingBlocksCount();
        Long replicaMissingBlocks = (Long) mbs.getAttribute(replStateMBeanName, "MissingReplicatedBlocks");
        Long ecMissingBlocks = (Long) mbs.getAttribute(ecBlkGrpStateMBeanName, "MissingECBlockGroups");
        assertEquals("Unexpected total missing blocks!", expectedMissingBlockCount, totalMissingBlocks);
        assertEquals("Unexpected total missing blocks!", totalMissingBlocks, (replicaMissingBlocks + ecMissingBlocks));
        assertEquals("Unexpected total ec missing blocks!", expectedMissingBlockCount, ecMissingBlocks.longValue());
        long totalCorruptBlocks = cluster.getNamesystem().getCorruptReplicaBlocks();
        Long replicaCorruptBlocks = (Long) mbs.getAttribute(replStateMBeanName, "CorruptReplicatedBlocks");
        Long ecCorruptBlocks = (Long) mbs.getAttribute(ecBlkGrpStateMBeanName, "CorruptECBlockGroups");
        assertEquals("Unexpected total corrupt blocks!", expectedCorruptBlockCount, totalCorruptBlocks);
        assertEquals("Unexpected total corrupt blocks!", totalCorruptBlocks, (replicaCorruptBlocks + ecCorruptBlocks));
        assertEquals("Unexpected total ec corrupt blocks!", expectedCorruptBlockCount, ecCorruptBlocks.longValue());
        String corruptFiles = (String) (mbs.getAttribute(namenodeMXBeanName, "CorruptFiles"));
        int numCorruptFiles = ((Object[]) JSON.parse(corruptFiles)).length;
        assertEquals(1, numCorruptFiles);
    } finally {
        if (fs != null) {
            try {
                fs.close();
            } catch (Exception e) {
                throw e;
            }
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
303426.981112hadoop
public void testDataTransferProtocol() throws IOException {
    Random random = new Random();
    int oneMil = 1024 * 1024;
    Path file = new Path("dataprotocol.dat");
    int numDataNodes = 1;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        datanode = cluster.getFileSystem().getDataNodeStats(DatanodeReportType.LIVE)[0];
        dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
        FileSystem fileSys = cluster.getFileSystem();
        int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
        DFSTestUtil.createFile(fileSys, file, fileLen, fileLen, fileSys.getDefaultBlockSize(file), fileSys.getDefaultReplication(file), 0L);
        final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        final String poolId = firstBlock.getBlockPoolId();
        long newBlockId = firstBlock.getBlockId() + 1;
        recvBuf.reset();
        sendBuf.reset();
        recvOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendOut.writeShort((short) (DataTransferProtocol.DATA_TRANSFER_VERSION - 1));
        sendRecvData("Wrong Version", true);
        sendBuf.reset();
        sendOut.writeShort((short) DataTransferProtocol.DATA_TRANSFER_VERSION);
        sendOut.writeByte(Op.WRITE_BLOCK.code - 1);
        sendRecvData("Wrong Op Code", true);
        sendBuf.reset();
        DataChecksum badChecksum = Mockito.spy(DEFAULT_CHECKSUM);
        Mockito.doReturn(-1).when(badChecksum).getBytesPerChecksum();
        writeBlock(poolId, newBlockId, badChecksum);
        recvBuf.reset();
        sendResponse(Status.ERROR, null, null, recvOut);
        sendRecvData("wrong bytesPerChecksum while writing", true);
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        PacketHeader hdr = new PacketHeader(4, 0, 100, false, -1 - random.nextInt(oneMil), false);
        hdr.write(sendOut);
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.ERROR) }).write(recvOut);
        sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, true);
        sendBuf.reset();
        recvBuf.reset();
        writeBlock(poolId, ++newBlockId, DEFAULT_CHECKSUM);
        hdr = new PacketHeader(8, 0, 100, true, 0, false);
        hdr.write(sendOut);
        sendOut.writeInt(0);
        sendOut.flush();
        sendResponse(Status.SUCCESS, "", null, recvOut);
        new PipelineAck(100, new int[] { PipelineAck.combineHeader(PipelineAck.ECN.DISABLED, Status.SUCCESS) }).write(recvOut);
        sendRecvData("Writing a zero len block blockid " + newBlockId, false);
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock blk = new ExtendedBlock(bpid, firstBlock.getLocalBlock());
        long blkid = blk.getBlockId();
        sendBuf.reset();
        recvBuf.reset();
        blk.setBlockId(blkid - 1);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong block ID " + newBlockId + " for read", false);
        sendBuf.reset();
        blk.setBlockId(blkid);
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", -1L, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", fileLen, fileLen, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false);
        recvBuf.reset();
        BlockOpResponseProto.newBuilder().setStatus(Status.SUCCESS).setReadOpChecksumInfo(ReadOpChecksumInfoProto.newBuilder().setChecksum(DataTransferProtoUtil.toProto(DEFAULT_CHECKSUM)).setChunkOffset(0L)).build().writeDelimitedTo(recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, -1L - random.nextInt(oneMil), true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false);
        recvBuf.reset();
        sendResponse(Status.ERROR, null, "opReadBlock " + firstBlock + " received exception java.io.IOException:  " + "Offset 0 and length 4097 don't match block " + firstBlock + " ( blockLen 4096 )", recvOut);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen + 1, true, CachingStrategy.newDefaultStrategy());
        sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false);
        sendBuf.reset();
        sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", 0L, fileLen, true, CachingStrategy.newDefaultStrategy());
        readFile(fileSys, file, fileLen);
    } finally {
        cluster.shutdown();
    }
}
302418.2510109hadoop
public void testRequeueUnhealthyDecommissioningNodes() throws Exception {
    final int numLiveNodes = 3;
    final int numDeadNodes = 2;
    final int numNodes = numLiveNodes + numDeadNodes;
    final List<DatanodeDescriptor> liveNodes = new ArrayList<>();
    final Map<DatanodeDescriptor, MiniDFSCluster.DataNodeProperties> deadNodeProps = new HashMap<>();
    final ArrayList<DatanodeInfo> decommissionedNodes = new ArrayList<>();
    final Path filePath = new Path("/tmp/test");
    createClusterWithDeadNodesDecommissionInProgress(numLiveNodes, liveNodes, numDeadNodes, deadNodeProps, decommissionedNodes, filePath);
    final FSNamesystem namesystem = getCluster().getNamesystem();
    final BlockManager blockManager = namesystem.getBlockManager();
    final DatanodeManager datanodeManager = blockManager.getDatanodeManager();
    final DatanodeAdminManager decomManager = datanodeManager.getDatanodeAdminManager();
    final Duration checkDuration = Duration.ofSeconds(5);
    Instant checkUntil = Instant.now().plus(checkDuration);
    while (Instant.now().isBefore(checkUntil)) {
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        assertEquals("Unexpected number of decommissioning nodes queued in DatanodeAdminManager.", 0, decomManager.getNumPendingNodes());
        assertEquals("Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.", numDeadNodes, decomManager.getNumTrackedNodes());
        assertTrue("Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.", deadNodeProps.keySet().stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
        Thread.sleep(500);
    }
    getCluster().getFileSystem().delete(filePath, true);
    int numLiveDecommNodes = 2;
    final List<DatanodeDescriptor> liveDecommNodes = liveNodes.subList(0, numLiveDecommNodes);
    for (final DatanodeDescriptor liveNode : liveDecommNodes) {
        takeNodeOutofService(0, liveNode.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
        decommissionedNodes.add(liveNode);
    }
    writeFile(getCluster().getFileSystem(), filePath, numNodes, 10);
    GenericTestUtils.waitFor(() -> decomManager.getNumTrackedNodes() == numDeadNodes && decomManager.getNumPendingNodes() == numLiveDecommNodes && liveDecommNodes.stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)), 500, 30000);
    assertThat(liveDecommNodes).as("Check all live decommissioning nodes queued in DatanodeAdminManager").containsAll(decomManager.getPendingNodes());
    if (this instanceof TestDecommissionWithBackoffMonitor) {
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        assertEquals("DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.", 2, decomManager.getNumPendingNodes());
        assertEquals("DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.", 0, decomManager.getNumTrackedNodes());
    } else {
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        assertEquals("DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.", 4, decomManager.getNumPendingNodes());
        assertEquals("DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.", 0, decomManager.getNumTrackedNodes());
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        assertEquals("DatanodeAdminDefaultMonitor did not decommission live nodes as expected.", 2, decomManager.getNumPendingNodes());
        assertEquals("DatanodeAdminDefaultMonitor did not decommission live nodes as expected.", 0, decomManager.getNumTrackedNodes());
    }
    assertTrue("Live nodes not DECOMMISSIONED as expected.", liveDecommNodes.stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED)));
    assertTrue("Dead nodes not DECOMMISSION_INPROGRESS as expected.", deadNodeProps.keySet().stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
    assertThat(deadNodeProps.keySet()).as("Check all dead decommissioning nodes queued in DatanodeAdminManager").containsAll(decomManager.getPendingNodes());
    checkUntil = Instant.now().plus(checkDuration);
    while (Instant.now().isBefore(checkUntil)) {
        BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        assertEquals("Unexpected number of decommissioning nodes queued in DatanodeAdminManager.", 0, decomManager.getNumPendingNodes());
        assertEquals("Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.", numDeadNodes, decomManager.getNumTrackedNodes());
        assertTrue("Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.", deadNodeProps.keySet().stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
        Thread.sleep(500);
    }
    getCluster().getFileSystem().delete(filePath, true);
    GenericTestUtils.waitFor(() -> {
        try {
            BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
        } catch (ExecutionException | InterruptedException e) {
            LOG.warn("Exception running DatanodeAdminMonitor", e);
            return false;
        }
        return decomManager.getNumTrackedNodes() == 0 && decomManager.getNumPendingNodes() == 0 && deadNodeProps.keySet().stream().allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED));
    }, 500, 30000);
}
303162.474108hadoop
public void testFileCreationNamenodeRestart() throws IOException, NoSuchFieldException, IllegalAccessException {
    Configuration conf = new HdfsConfiguration();
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
    if (simulatedStorage) {
        SimulatedFSDataset.setFactory(conf);
    }
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        final int nnport = cluster.getNameNodePort();
        Path file1 = new Path("/filestatus.dat");
        HdfsDataOutputStream stm = create(fs, file1, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
        assertEquals(file1 + " should be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
        writeFile(stm, numBlocks * blockSize);
        stm.hflush();
        assertEquals(file1 + " should still be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
        Path fileRenamed = new Path("/filestatusRenamed.dat");
        fs.rename(file1, fileRenamed);
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed);
        file1 = fileRenamed;
        Path file2 = new Path("/filestatus2.dat");
        FSDataOutputStream stm2 = createFile(fs, file2, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
        Path file3 = new Path("/user/home/fullpath.dat");
        FSDataOutputStream stm3 = createFile(fs, file3, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
        Path file4 = new Path("/user/home/fullpath4.dat");
        FSDataOutputStream stm4 = createFile(fs, file4, 1);
        System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
        fs.mkdirs(new Path("/bin"));
        fs.rename(new Path("/user/home"), new Path("/bin"));
        Path file3new = new Path("/bin/home/fullpath.dat");
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new);
        Path file4new = new Path("/bin/home/fullpath4.dat");
        System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new);
        cluster.shutdown(false, false);
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        cluster.shutdown(false, false);
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSOutputStream dfstream = (DFSOutputStream) (stm.getWrappedStream());
        Field f = DFSOutputStream.class.getDeclaredField("src");
        Field modifiersField = Field.class.getDeclaredField("modifiers");
        modifiersField.setAccessible(true);
        modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
        f.setAccessible(true);
        f.set(dfstream, file1.toString());
        dfstream = (DFSOutputStream) (stm3.getWrappedStream());
        f.set(dfstream, file3new.toString());
        dfstream = (DFSOutputStream) (stm4.getWrappedStream());
        f.set(dfstream, file4new.toString());
        byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
        stm.write(buffer);
        stm.close();
        stm2.write(buffer);
        stm2.close();
        stm3.close();
        stm4.close();
        DFSClient client = fs.dfs;
        LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        System.out.println("locations = " + locations.locatedBlockCount());
        assertTrue("Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3);
        locations = client.getNamenode().getBlockLocations(file2.toString(), 0, Long.MAX_VALUE);
        System.out.println("locations = " + locations.locatedBlockCount());
        assertTrue("Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1);
    } finally {
        IOUtils.closeStream(fs);
        cluster.shutdown();
    }
}
303520.814104hadoop
public void testTimes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    final int nnport = cluster.getNameNodePort();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    int replicas = 1;
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        System.out.println("Creating testdir1 and testdir1/test1.dat.");
        Path dir1 = new Path("testdir1");
        Path file1 = new Path(dir1, "test1.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        FileStatus stat = fileSys.getFileStatus(file1);
        long atimeBeforeClose = stat.getAccessTime();
        String adate = dateForm.format(new Date(atimeBeforeClose));
        System.out.println("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
        assertTrue(atimeBeforeClose != 0);
        stm.close();
        stat = fileSys.getFileStatus(file1);
        long atime1 = stat.getAccessTime();
        long mtime1 = stat.getModificationTime();
        adate = dateForm.format(new Date(atime1));
        String mdate = dateForm.format(new Date(mtime1));
        System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
        System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
        assertTrue(atime1 != 0);
        fileSys.setTimes(file1, -2, -2);
        stat = fileSys.getFileStatus(file1);
        assertEquals(mtime1, stat.getModificationTime());
        assertEquals(atime1, stat.getAccessTime());
        stat = fileSys.getFileStatus(dir1);
        long mdir1 = stat.getAccessTime();
        assertTrue(mdir1 == 0);
        long atime2 = atime1 - (24L * 3600L * 1000L);
        fileSys.setTimes(file1, -1, atime2);
        stat = fileSys.getFileStatus(file1);
        long atime3 = stat.getAccessTime();
        String adate3 = dateForm.format(new Date(atime3));
        System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
        assertTrue(atime2 == atime3);
        assertTrue(mtime1 == stat.getModificationTime());
        long mtime2 = mtime1 - (3600L * 1000L);
        fileSys.setTimes(file1, mtime2, -1);
        stat = fileSys.getFileStatus(file1);
        long mtime3 = stat.getModificationTime();
        String mdate3 = dateForm.format(new Date(mtime3));
        System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime2 == mtime3);
        long mtime4 = Time.now() - (3600L * 1000L);
        long atime4 = Time.now();
        fileSys.setTimes(dir1, mtime4, atime4);
        stat = fileSys.getFileStatus(dir1);
        assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
        assertTrue("Not matching the access times", atime4 == stat.getAccessTime());
        Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
        try {
            fileSys.setTimes(nonExistingDir, mtime4, atime4);
            fail("Expecting FileNotFoundException");
        } catch (FileNotFoundException e) {
            assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
        }
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        System.out.println("Verifying times after cluster restart");
        stat = fileSys.getFileStatus(file1);
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime3 == stat.getModificationTime());
        cleanupFile(fileSys, file1);
        cleanupFile(fileSys, dir1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
304045.531107hadoop
public void testVerifyECCommand() throws Exception {
    final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
    cluster = DFSTestUtil.setupCluster(conf, 6, 5, 0);
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    assertEquals("ret: 1, verifyEC -file <file> [-blockId <blk_Id>] " + "[-skipFailureBlocks]  -file Verify HDFS erasure coding on all block groups of the file." + "  -skipFailureBlocks specify will skip any block group failures during verify," + "  and continues verify all block groups of the file," + "  the default is not to skip failure blocks." + "  -blockId specify blk_Id to verify for a specific one block group.", runCmd(new String[] { "verifyEC" }));
    assertEquals("ret: 1, File /bar does not exist.", runCmd(new String[] { "verifyEC", "-file", "/bar" }));
    fs.create(new Path("/bar")).close();
    assertEquals("ret: 1, File /bar is not erasure coded.", runCmd(new String[] { "verifyEC", "-file", "/bar" }));
    final Path ecDir = new Path("/ec");
    fs.mkdir(ecDir, FsPermission.getDirDefault());
    fs.enableErasureCodingPolicy(ecPolicy.getName());
    fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
    assertEquals("ret: 1, File /ec is not a regular file.", runCmd(new String[] { "verifyEC", "-file", "/ec" }));
    fs.create(new Path(ecDir, "foo"));
    assertEquals("ret: 1, File /ec/foo is not closed.", runCmd(new String[] { "verifyEC", "-file", "/ec/foo" }));
    final short repl = 1;
    final long k = 1024;
    final long m = k * k;
    final long seed = 0x1234567L;
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_65535"), 65535, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_65535" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_256k"), 256 * k, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_256k" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_1m"), m, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_1m" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_2m"), 2 * m, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_2m" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_3m"), 3 * m, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_3m" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_5m"), 5 * m, repl, seed);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_5m" }).contains("All EC block group status: OK"));
    DFSTestUtil.createFile(fs, new Path(ecDir, "foo_6m"), (int) k, 6 * m, m, repl, seed);
    assertEquals("ret: 0, Checking EC block group: blk_x;Status: OK" + "Checking EC block group: blk_x;Status: OK" + "All EC block group status: OK", runCmd(new String[] { "verifyEC", "-file", "/ec/foo_6m" }).replaceAll("blk_-[0-9]+", "blk_x;"));
    Path corruptFile = new Path(ecDir, "foo_corrupt");
    DFSTestUtil.createFile(fs, corruptFile, 5841961, repl, seed);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, corruptFile);
    assertEquals(1, blocks.size());
    LocatedStripedBlock blockGroup = (LocatedStripedBlock) blocks.get(0);
    LocatedBlock[] indexedBlocks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, ecPolicy.getCellSize(), ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits());
    LocatedBlock toCorruptLocatedBlock = indexedBlocks[0];
    ExtendedBlock toCorruptBlock = toCorruptLocatedBlock.getBlock();
    DataNode datanode = cluster.getDataNode(toCorruptLocatedBlock.getLocations()[0].getIpcPort());
    File blockFile = getBlockFile(datanode.getFSDataset(), toCorruptBlock.getBlockPoolId(), toCorruptBlock.getLocalBlock());
    File metaFile = getMetaFile(datanode.getFSDataset(), toCorruptBlock.getBlockPoolId(), toCorruptBlock.getLocalBlock());
    byte[] errorBytes = new byte[2097152];
    new Random(seed).nextBytes(errorBytes);
    FileUtils.writeByteArrayToFile(blockFile, errorBytes);
    metaFile.delete();
    runCmd(new String[] { "computeMeta", "-block", blockFile.getAbsolutePath(), "-out", metaFile.getAbsolutePath() });
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_corrupt" }).contains("Status: ERROR, message: EC compute result not match."));
    Path newFile = new Path(ecDir, "foo_new");
    DFSTestUtil.createFile(fs, newFile, (int) k, 6 * m, m, repl, seed);
    blocks = DFSTestUtil.getAllBlocks(fs, newFile);
    assertEquals(2, blocks.size());
    blockGroup = (LocatedStripedBlock) blocks.get(0);
    String blockName = blockGroup.getBlock().getBlockName();
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_new", "-blockId", blockName }).contains("ret: 0, Checking EC block group: " + blockName + "Status: OK"));
    indexedBlocks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, ecPolicy.getCellSize(), ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits());
    toCorruptLocatedBlock = indexedBlocks[0];
    toCorruptBlock = toCorruptLocatedBlock.getBlock();
    datanode = cluster.getDataNode(toCorruptLocatedBlock.getLocations()[0].getIpcPort());
    blockFile = getBlockFile(datanode.getFSDataset(), toCorruptBlock.getBlockPoolId(), toCorruptBlock.getLocalBlock());
    metaFile = getMetaFile(datanode.getFSDataset(), toCorruptBlock.getBlockPoolId(), toCorruptBlock.getLocalBlock());
    metaFile.delete();
    errorBytes = new byte[1048576];
    new Random(0x12345678L).nextBytes(errorBytes);
    FileUtils.writeByteArrayToFile(blockFile, errorBytes);
    runCmd(new String[] { "computeMeta", "-block", blockFile.getAbsolutePath(), "-out", metaFile.getAbsolutePath() });
    LocatedStripedBlock blockGroup2 = (LocatedStripedBlock) blocks.get(1);
    assertTrue(runCmd(new String[] { "verifyEC", "-file", "/ec/foo_new", "-skipFailureBlocks" }).contains("ret: 1, Checking EC block group: " + blockGroup.getBlock().getBlockName() + "Status: ERROR, message: EC compute result not match." + "Checking EC block group: " + blockGroup2.getBlock().getBlockName() + "Status: OK"));
}
30691.436111hadoop
public void incInvokedConcurrent(Method method) {
    switch(method.getName()) {
        case "setReplication":
            concurrentSetReplicationOps.incr();
            break;
        case "setPermission":
            concurrentSetPermissionOps.incr();
            break;
        case "setOwner":
            concurrentSetOwnerOps.incr();
            break;
        case "rename":
            concurrentRenameOps.incr();
            break;
        case "rename2":
            concurrentRename2Ops.incr();
            break;
        case "delete":
            concurrentDeleteOps.incr();
            break;
        case "mkdirs":
            concurrentMkdirsOps.incr();
            break;
        case "renewLease":
            concurrentRenewLeaseOps.incr();
            break;
        case "getListing":
            concurrentGetListingOps.incr();
            break;
        case "getFileInfo":
            concurrentGetFileInfoOps.incr();
            break;
        case "getStats":
            concurrentGetStatsOps.incr();
            break;
        case "getDatanodeReport":
            concurrentGetDatanodeReportOps.incr();
            break;
        case "setSafeMode":
            concurrentSetSafeModeOps.incr();
            break;
        case "restoreFailedStorage":
            concurrentRestoreFailedStorageOps.incr();
            break;
        case "saveNamespace":
            concurrentSaveNamespaceOps.incr();
            break;
        case "rollEdits":
            concurrentRollEditsOps.incr();
            break;
        case "refreshNodes":
            concurrentRefreshNodesOps.incr();
            break;
        case "finalizeUpgrade":
            concurrentFinalizeUpgradeOps.incr();
            break;
        case "rollingUpgrade":
            concurrentRollingUpgradeOps.incr();
            break;
        case "metaSave":
            concurrentMetaSaveOps.incr();
            break;
        case "listCorruptFileBlocks":
            concurrentListCorruptFileBlocksOps.incr();
            break;
        case "setBalancerBandwidth":
            concurrentSetBalancerBandwidthOps.incr();
            break;
        case "getContentSummary":
            concurrentGetContentSummaryOps.incr();
            break;
        case "modifyAclEntries":
            concurrentModifyAclEntriesOps.incr();
            break;
        case "removeAclEntries":
            concurrentRemoveAclEntriesOps.incr();
            break;
        case "removeDefaultAcl":
            concurrentRemoveDefaultAclOps.incr();
            break;
        case "removeAcl":
            concurrentRemoveAclOps.incr();
            break;
        case "setAcl":
            concurrentSetAclOps.incr();
            break;
        case "setXAttr":
            concurrentSetXAttrOps.incr();
            break;
        case "removeXAttr":
            concurrentRemoveXAttrOps.incr();
            break;
        case "getCurrentEditLogTxid":
            concurrentGetCurrentEditLogTxidOps.incr();
            break;
        case "getReplicatedBlockStats":
            concurrentGetReplicatedBlockStatsOps.incr();
            break;
        case "setQuota":
            concurrentSetQuotaOps.incr();
            break;
        case "getQuotaUsage":
            concurrentGetQuotaUsageOps.incr();
            break;
        case "getSlowDatanodeReport":
            concurrentGetSlowDatanodeReportOps.incr();
            break;
        default:
            concurrentOtherOps.incr();
    }
}
302943.235111hadoop
public void testUncompressedInputCustomDelimiterPosValue() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt("io.file.buffer.size", 10);
    conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
    String inputData = "abcdefghij++kl++mno";
    Path inputFile = createInputFile(conf, inputData);
    String delimiter = "++";
    byte[] recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
    int splitLength = 15;
    FileSplit split = new FileSplit(inputFile, 0, splitLength, (String[]) null);
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
    LineRecordReader reader = new LineRecordReader(recordDelimiterBytes);
    reader.initialize(split, context);
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    LongWritable key = reader.getCurrentKey();
    Text value = reader.getCurrentValue();
    assertEquals("Wrong length for record value", 10, value.getLength());
    assertEquals("Wrong position after record read", 0, key.get());
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    assertEquals("Wrong length for record value", 2, value.getLength());
    assertEquals("Wrong position after record read", 12, key.get());
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    assertEquals("Wrong length for record value", 3, value.getLength());
    assertEquals("Wrong position after record read", 16, key.get());
    assertFalse(reader.nextKeyValue());
    assertEquals("Wrong position after record read", 19, key.get());
    key = reader.getCurrentKey();
    assertNull("Unexpected key returned", key);
    reader.close();
    split = new FileSplit(inputFile, splitLength, inputData.length() - splitLength, (String[]) null);
    reader = new LineRecordReader(recordDelimiterBytes);
    reader.initialize(split, context);
    assertFalse("Unexpected record returned", reader.nextKeyValue());
    key = reader.getCurrentKey();
    assertNull("Unexpected key returned", key);
    reader.close();
    inputData = "abcd+efgh++ijk++mno";
    inputFile = createInputFile(conf, inputData);
    splitLength = 5;
    split = new FileSplit(inputFile, 0, splitLength, (String[]) null);
    reader = new LineRecordReader(recordDelimiterBytes);
    reader.initialize(split, context);
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    key = reader.getCurrentKey();
    value = reader.getCurrentValue();
    assertEquals("Wrong position after record read", 0, key.get());
    assertEquals("Wrong length for record value", 9, value.getLength());
    assertFalse(reader.nextKeyValue());
    assertEquals("Wrong position after record read", 11, key.get());
    key = reader.getCurrentKey();
    assertNull("Unexpected key returned", key);
    reader.close();
    split = new FileSplit(inputFile, splitLength, inputData.length() - splitLength, (String[]) null);
    reader = new LineRecordReader(recordDelimiterBytes);
    reader.initialize(split, context);
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    key = reader.getCurrentKey();
    value = reader.getCurrentValue();
    assertEquals("Wrong position after record read", 11, key.get());
    assertEquals("Wrong length for record value", 3, value.getLength());
    assertTrue("Expected record got nothing", reader.nextKeyValue());
    assertEquals("Wrong position after record read", 16, key.get());
    assertEquals("Wrong length for record value", 3, value.getLength());
    assertFalse(reader.nextKeyValue());
    assertEquals("Wrong position after record read", 19, key.get());
    reader.close();
    inputData = "abcd|efgh|+|ij|kl|+|mno|pqr";
    inputFile = createInputFile(conf, inputData);
    delimiter = "|+|";
    recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8);
    for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
        for (int splitSize = 1; splitSize < inputData.length(); splitSize++) {
            int keyPosition = 0;
            conf.setInt("io.file.buffer.size", bufferSize);
            split = new FileSplit(inputFile, 0, bufferSize, (String[]) null);
            reader = new LineRecordReader(recordDelimiterBytes);
            reader.initialize(split, context);
            assertTrue("Expected record got nothing", reader.nextKeyValue());
            key = reader.getCurrentKey();
            value = reader.getCurrentValue();
            assertTrue("abcd|efgh".equals(value.toString()));
            assertEquals("Wrong position after record read", keyPosition, key.get());
            keyPosition = 12;
            if (reader.nextKeyValue()) {
                assertTrue("ij|kl".equals(value.toString()));
                assertEquals("Wrong position after record read", keyPosition, key.get());
                keyPosition = 20;
            }
            if (reader.nextKeyValue()) {
                assertTrue("mno|pqr".equals(value.toString()));
                assertEquals("Wrong position after record read", keyPosition, key.get());
                keyPosition = inputData.length();
            }
            assertFalse("Unexpected record returned", reader.nextKeyValue());
            assertEquals("Wrong position after record read", keyPosition, key.get());
            key = reader.getCurrentKey();
            assertNull("Unexpected key returned", key);
            reader.close();
        }
    }
}
304148.9299hadoop
public void testInMemoryAndOnDiskMerger() throws Throwable {
    JobID jobId = new JobID("a", 0);
    TaskAttemptID reduceId1 = new TaskAttemptID(new TaskID(jobId, TaskType.REDUCE, 0), 0);
    TaskAttemptID mapId1 = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 1), 0);
    TaskAttemptID mapId2 = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 2), 0);
    LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    MergeManagerImpl<Text, Text> mergeManager = new MergeManagerImpl<Text, Text>(reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, null, null, new Progress(), new MROutputFiles());
    Map<String, String> map1 = new TreeMap<String, String>();
    map1.put("apple", "disgusting");
    map1.put("carrot", "delicious");
    Map<String, String> map2 = new TreeMap<String, String>();
    map1.put("banana", "pretty good");
    byte[] mapOutputBytes1 = writeMapOutput(jobConf, map1);
    byte[] mapOutputBytes2 = writeMapOutput(jobConf, map2);
    InMemoryMapOutput<Text, Text> mapOutput1 = new InMemoryMapOutput<Text, Text>(jobConf, mapId1, mergeManager, mapOutputBytes1.length, null, true);
    InMemoryMapOutput<Text, Text> mapOutput2 = new InMemoryMapOutput<Text, Text>(jobConf, mapId2, mergeManager, mapOutputBytes2.length, null, true);
    System.arraycopy(mapOutputBytes1, 0, mapOutput1.getMemory(), 0, mapOutputBytes1.length);
    System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0, mapOutputBytes2.length);
    MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger = mergeManager.createInMemoryMerger();
    List<InMemoryMapOutput<Text, Text>> mapOutputs1 = new ArrayList<InMemoryMapOutput<Text, Text>>();
    mapOutputs1.add(mapOutput1);
    mapOutputs1.add(mapOutput2);
    inMemoryMerger.merge(mapOutputs1);
    Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
    TaskAttemptID reduceId2 = new TaskAttemptID(new TaskID(jobId, TaskType.REDUCE, 3), 0);
    TaskAttemptID mapId3 = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 4), 0);
    TaskAttemptID mapId4 = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 5), 0);
    Map<String, String> map3 = new TreeMap<String, String>();
    map3.put("apple", "awesome");
    map3.put("carrot", "amazing");
    Map<String, String> map4 = new TreeMap<String, String>();
    map4.put("banana", "bla");
    byte[] mapOutputBytes3 = writeMapOutput(jobConf, map3);
    byte[] mapOutputBytes4 = writeMapOutput(jobConf, map4);
    InMemoryMapOutput<Text, Text> mapOutput3 = new InMemoryMapOutput<Text, Text>(jobConf, mapId3, mergeManager, mapOutputBytes3.length, null, true);
    InMemoryMapOutput<Text, Text> mapOutput4 = new InMemoryMapOutput<Text, Text>(jobConf, mapId4, mergeManager, mapOutputBytes4.length, null, true);
    System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0, mapOutputBytes3.length);
    System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0, mapOutputBytes4.length);
    MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger2 = mergeManager.createInMemoryMerger();
    List<InMemoryMapOutput<Text, Text>> mapOutputs2 = new ArrayList<InMemoryMapOutput<Text, Text>>();
    mapOutputs2.add(mapOutput3);
    mapOutputs2.add(mapOutput4);
    inMemoryMerger2.merge(mapOutputs2);
    Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size());
    List<CompressAwarePath> paths = new ArrayList<CompressAwarePath>();
    Iterator<CompressAwarePath> iterator = mergeManager.onDiskMapOutputs.iterator();
    List<String> keys = new ArrayList<String>();
    List<String> values = new ArrayList<String>();
    while (iterator.hasNext()) {
        CompressAwarePath next = iterator.next();
        readOnDiskMapOutput(jobConf, fs, next, keys, values);
        paths.add(next);
    }
    assertThat(keys).isEqualTo(Arrays.asList("apple", "banana", "carrot", "apple", "banana", "carrot"));
    assertThat(values).isEqualTo(Arrays.asList("awesome", "bla", "amazing", "disgusting", "pretty good", "delicious"));
    mergeManager.close();
    mergeManager = new MergeManagerImpl<Text, Text>(reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null, null, null, new Progress(), new MROutputFiles());
    MergeThread<CompressAwarePath, Text, Text> onDiskMerger = mergeManager.createOnDiskMerger();
    onDiskMerger.merge(paths);
    Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
    keys = new ArrayList<String>();
    values = new ArrayList<String>();
    readOnDiskMapOutput(jobConf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values);
    assertThat(keys).isEqualTo(Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot"));
    assertThat(values).isEqualTo(Arrays.asList("awesome", "disgusting", "pretty good", "bla", "amazing", "delicious"));
    mergeManager.close();
    Assert.assertEquals(0, mergeManager.inMemoryMapOutputs.size());
    Assert.assertEquals(0, mergeManager.inMemoryMergedMapOutputs.size());
    Assert.assertEquals(0, mergeManager.onDiskMapOutputs.size());
}
302899.292291hadoop
public static void main(String[] args) throws IOException {
    String version = "NameNodeBenchmark.0.3";
    System.out.println(version);
    int bytesPerChecksum = -1;
    String usage = "Usage: nnbench " + "  -operation <one of createWrite, openRead, rename, or delete>\n " + "  -baseDir <base output/input DFS path>\n " + "  -startTime <time to start, given in seconds from the epoch>\n" + "  -numFiles <number of files to create>\n " + "  -replicationFactorPerFile <Replication factor for the files, default is 1>\n" + "  -blocksPerFile <number of blocks to create per file>\n" + "  [-bytesPerBlock <number of bytes to write to each block, default is 1>]\n" + "  [-bytesPerChecksum <value for io.bytes.per.checksum>]\n" + "Note: bytesPerBlock MUST be a multiple of bytesPerChecksum\n";
    String operation = null;
    for (int i = 0; i < args.length; i++) {
        if (args[i].equals("-baseDir")) {
            baseDir = new Path(args[++i]);
        } else if (args[i].equals("-numFiles")) {
            numFiles = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-blocksPerFile")) {
            blocksPerFile = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-bytesPerBlock")) {
            bytesPerBlock = Long.parseLong(args[++i]);
        } else if (args[i].equals("-bytesPerChecksum")) {
            bytesPerChecksum = Integer.parseInt(args[++i]);
        } else if (args[i].equals("-replicationFactorPerFile")) {
            replicationFactorPerFile = Short.parseShort(args[++i]);
        } else if (args[i].equals("-startTime")) {
            startTime = Long.parseLong(args[++i]) * 1000;
        } else if (args[i].equals("-operation")) {
            operation = args[++i];
        } else {
            System.out.println(usage);
            System.exit(-1);
        }
    }
    bytesPerFile = bytesPerBlock * blocksPerFile;
    JobConf jobConf = new JobConf(new Configuration(), NNBench.class);
    if (bytesPerChecksum < 0) {
        bytesPerChecksum = jobConf.getInt("io.bytes.per.checksum", 512);
    }
    jobConf.set("io.bytes.per.checksum", Integer.toString(bytesPerChecksum));
    System.out.println("Inputs: ");
    System.out.println("   operation: " + operation);
    System.out.println("   baseDir: " + baseDir);
    System.out.println("   startTime: " + startTime);
    System.out.println("   numFiles: " + numFiles);
    System.out.println("   replicationFactorPerFile: " + replicationFactorPerFile);
    System.out.println("   blocksPerFile: " + blocksPerFile);
    System.out.println("   bytesPerBlock: " + bytesPerBlock);
    System.out.println("   bytesPerChecksum: " + bytesPerChecksum);
    if (operation == null || baseDir == null || numFiles < 1 || blocksPerFile < 1 || bytesPerBlock < 0 || bytesPerBlock % bytesPerChecksum != 0) {
        System.err.println(usage);
        System.exit(-1);
    }
    fileSys = FileSystem.get(jobConf);
    String uniqueId = java.net.InetAddress.getLocalHost().getHostName();
    taskDir = new Path(baseDir, uniqueId);
    buffer = new byte[(int) Math.min(bytesPerFile, 32768L)];
    Date execTime;
    Date endTime;
    long duration;
    int exceptions = 0;
    barrier();
    execTime = new Date();
    System.out.println("Job started: " + startTime);
    if (operation.equals("createWrite")) {
        if (!fileSys.mkdirs(taskDir)) {
            throw new IOException("Mkdirs failed to create " + taskDir.toString());
        }
        exceptions = createWrite();
    } else if (operation.equals("openRead")) {
        exceptions = openRead();
    } else if (operation.equals("rename")) {
        exceptions = rename();
    } else if (operation.equals("delete")) {
        exceptions = delete();
    } else {
        System.err.println(usage);
        System.exit(-1);
    }
    endTime = new Date();
    System.out.println("Job ended: " + endTime);
    duration = (endTime.getTime() - execTime.getTime()) / 1000;
    System.out.println("The " + operation + " job took " + duration + " seconds.");
    System.out.println("The job recorded " + exceptions + " exceptions.");
}
303144.541597hadoop
public int run(String[] args) throws Exception {
    Configuration conf = getConf();
    JobClient client = new JobClient(conf);
    ClusterStatus cluster = client.getClusterStatus();
    int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
    String sort_reduces = conf.get(REDUCES_PER_HOST);
    if (sort_reduces != null) {
        num_reduces = cluster.getTaskTrackers() * Integer.parseInt(sort_reduces);
    }
    Class<? extends InputFormat> inputFormatClass = SequenceFileInputFormat.class;
    Class<? extends OutputFormat> outputFormatClass = SequenceFileOutputFormat.class;
    Class<? extends WritableComparable> outputKeyClass = BytesWritable.class;
    Class<? extends Writable> outputValueClass = BytesWritable.class;
    List<String> otherArgs = new ArrayList<String>();
    InputSampler.Sampler<K, V> sampler = null;
    for (int i = 0; i < args.length; ++i) {
        try {
            if ("-r".equals(args[i])) {
                num_reduces = Integer.parseInt(args[++i]);
            } else if ("-inFormat".equals(args[i])) {
                inputFormatClass = Class.forName(args[++i]).asSubclass(InputFormat.class);
            } else if ("-outFormat".equals(args[i])) {
                outputFormatClass = Class.forName(args[++i]).asSubclass(OutputFormat.class);
            } else if ("-outKey".equals(args[i])) {
                outputKeyClass = Class.forName(args[++i]).asSubclass(WritableComparable.class);
            } else if ("-outValue".equals(args[i])) {
                outputValueClass = Class.forName(args[++i]).asSubclass(Writable.class);
            } else if ("-totalOrder".equals(args[i])) {
                double pcnt = Double.parseDouble(args[++i]);
                int numSamples = Integer.parseInt(args[++i]);
                int maxSplits = Integer.parseInt(args[++i]);
                if (0 >= maxSplits)
                    maxSplits = Integer.MAX_VALUE;
                sampler = new InputSampler.RandomSampler<K, V>(pcnt, numSamples, maxSplits);
            } else {
                otherArgs.add(args[i]);
            }
        } catch (NumberFormatException except) {
            System.out.println("ERROR: Integer expected instead of " + args[i]);
            return printUsage();
        } catch (ArrayIndexOutOfBoundsException except) {
            System.out.println("ERROR: Required parameter missing from " + args[i - 1]);
            return printUsage();
        }
    }
    job = Job.getInstance(conf);
    job.setJobName("sorter");
    job.setJarByClass(Sort.class);
    job.setMapperClass(Mapper.class);
    job.setReducerClass(Reducer.class);
    job.setNumReduceTasks(num_reduces);
    job.setInputFormatClass(inputFormatClass);
    job.setOutputFormatClass(outputFormatClass);
    job.setOutputKeyClass(outputKeyClass);
    job.setOutputValueClass(outputValueClass);
    if (otherArgs.size() != 2) {
        System.out.println("ERROR: Wrong number of parameters: " + otherArgs.size() + " instead of 2.");
        return printUsage();
    }
    FileInputFormat.setInputPaths(job, otherArgs.get(0));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
    if (sampler != null) {
        System.out.println("Sampling input to effect total-order sort...");
        job.setPartitionerClass(TotalOrderPartitioner.class);
        Path inputDir = FileInputFormat.getInputPaths(job)[0];
        FileSystem fs = inputDir.getFileSystem(conf);
        inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
        Path partitionFile = new Path(inputDir, "_sortPartitioning");
        TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
        InputSampler.<K, V>writePartitionFile(job, sampler);
        URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
        job.addCacheFile(partitionUri);
    }
    System.out.println("Running on " + cluster.getTaskTrackers() + " nodes to sort from " + FileInputFormat.getInputPaths(job)[0] + " into " + FileOutputFormat.getOutputPath(job) + " with " + num_reduces + " reduces.");
    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    int ret = job.waitForCompletion(true) ? 0 : 1;
    Date end_time = new Date();
    System.out.println("Job ended: " + end_time);
    System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
    return ret;
}
302647.272116hadoop
public void testPreserve() {
    DistCpOptions options = OptionsParser.parse(new String[] { "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    options = OptionsParser.parse(new String[] { "-p", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
    options = OptionsParser.parse(new String[] { "-p", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
    options = OptionsParser.parse(new String[] { "-pbr", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
    options = OptionsParser.parse(new String[] { "-pbrgup", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
    options = OptionsParser.parse(new String[] { "-pbrgupcaxt", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.TIMES));
    options = OptionsParser.parse(new String[] { "-pc", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
    Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
    options = OptionsParser.parse(new String[] { "-p", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    Assert.assertEquals(DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.length() - 2, options.getPreserveAttributes().size());
    try {
        OptionsParser.parse(new String[] { "-pabcd", "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target" });
        Assert.fail("Invalid preserve attribute");
    } catch (NoSuchElementException ignore) {
    }
    Builder builder = new DistCpOptions.Builder(new Path("hdfs://localhost:8020/source/first"), new Path("hdfs://localhost:8020/target/"));
    Assert.assertFalse(builder.build().shouldPreserve(FileAttribute.PERMISSION));
    builder.preserve(FileAttribute.PERMISSION);
    Assert.assertTrue(builder.build().shouldPreserve(FileAttribute.PERMISSION));
    builder.preserve(FileAttribute.PERMISSION);
    Assert.assertTrue(builder.build().shouldPreserve(FileAttribute.PERMISSION));
}
302068.1820104hadoop
public final void parseLine(final String logLine, final Map<String, JobMetaData> jobMetas, final Map<RecurrenceId, List<ResourceSkyline>> skylineRecords) throws DataFieldNotFoundException, ParseException {
    final String[] splits = logLine.split(",", 5);
    if (splits.length < 5) {
        return;
    }
    final Matcher jobEventMatcher = FILTER_PATTERN.matcher(splits[4]);
    if (!jobEventMatcher.find()) {
        return;
    }
    final long date = PARSERUTIL.stringToUnixTimestamp(splits[1]);
    final String tail = splits[4].split("\\s+", 4)[3];
    switch(jobEventMatcher.group(1)) {
        case "Submit Application Request":
            {
                final Matcher appIdMatch = SUBMISSION_PATTERN.matcher(tail);
                if (appIdMatch.find()) {
                    final String appId = appIdMatch.group(1);
                    jobMetas.put(appId, new JobMetaData(date));
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        case "AM Allocated Container":
            {
                final Matcher containerEventMatcher = CONTAINER_EVENT_PATTERN.matcher(tail);
                if (containerEventMatcher.find()) {
                    final String appId = containerEventMatcher.group(1);
                    final String containerId = containerEventMatcher.group(2);
                    final JobMetaData appMeta = jobMetas.get(appId);
                    if (appMeta != null) {
                        appMeta.setContainerStart(containerId, date);
                    }
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        case ", Resource:":
            {
                final Matcher containerSpecMatcher = CONTAINER_SPEC_PATTERN.matcher(tail);
                if (containerSpecMatcher.find()) {
                    final String appId = "application_" + containerSpecMatcher.group(2);
                    final JobMetaData appMeta = jobMetas.get(appId);
                    if (appMeta != null) {
                        final long memAlloc = Long.parseLong(containerSpecMatcher.group(3));
                        final int cpuAlloc = Integer.parseInt(containerSpecMatcher.group(4));
                        final Resource containerAlloc = Resource.newInstance(memAlloc, cpuAlloc);
                        appMeta.getResourceSkyline().setContainerSpec(containerAlloc);
                    }
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        case "AM Released Container":
            {
                final Matcher containerEventMatcher = CONTAINER_EVENT_PATTERN.matcher(tail);
                if (containerEventMatcher.find()) {
                    final String appId = containerEventMatcher.group(1);
                    final JobMetaData appMeta = jobMetas.get(appId);
                    if (appMeta != null) {
                        final String containerId = containerEventMatcher.group(2);
                        appMeta.setContainerEnd(containerId, date);
                    }
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        case "finalState=FAILED":
            {
                final Matcher failMatcher = FAIL_PATTERN.matcher(tail);
                if (failMatcher.find()) {
                    final String appId = "application_" + failMatcher.group(1);
                    if (jobMetas.containsKey(appId)) {
                        jobMetas.put(appId, new JobMetaData(date));
                    }
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        case "ApplicationSummary":
            {
                final Matcher finishMatcher = FINISH_PATTERN.matcher(tail);
                if (finishMatcher.find()) {
                    final String appId = finishMatcher.group(1);
                    final String pipelineId = finishMatcher.group(2);
                    final String runId = finishMatcher.group(3);
                    final RecurrenceId recurrenceId = new RecurrenceId(pipelineId, runId);
                    final JobMetaData appMeta = jobMetas.remove(appId);
                    if (appMeta != null) {
                        appMeta.setRecurrenceId(recurrenceId).setJobFinishTime(date).getResourceSkyline().setJobInputDataSize(0);
                        appMeta.createSkyline();
                        final ResourceSkyline resourceSkyline = appMeta.getResourceSkyline();
                        resourceSkyline.setJobId(appId);
                        aggregateSkyline(resourceSkyline, recurrenceId, skylineRecords);
                    }
                } else {
                    throw new DataFieldNotFoundException(tail);
                }
                break;
            }
        default:
            break;
    }
}
301913.2117107hadoop
public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    try (DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true))) {
        credentials.writeTokenStorageToStream(os);
    }
    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }
    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newContainerId(attemptId, 0);
    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());
    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());
    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream(), StandardCharsets.UTF_8));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream(), StandardCharsets.UTF_8));
    Thread errThread = new Thread() {

        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {

        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }
    try {
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}
301984.3713110hadoop
public void testResourceValidation() throws Exception {
    assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR.length());
    SliderFileSystem sfs = ServiceTestUtils.initMockFs();
    Service app = new Service();
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with no name");
    } catch (IllegalArgumentException e) {
        assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage());
    }
    app.setName("test");
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + " service with no version");
    } catch (IllegalArgumentException e) {
        assertEquals(String.format(ERROR_APPLICATION_VERSION_INVALID, app.getName()), e.getMessage());
    }
    app.setVersion("v1");
    String[] badNames = { "4finance", "Finance", "finance@home", LEN_64_STR };
    for (String badName : badNames) {
        app.setName(badName);
        try {
            ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
            Assert.fail(EXCEPTION_PREFIX + "service with bad name " + badName);
        } catch (IllegalArgumentException e) {
        }
    }
    app.setName(LEN_64_STR);
    Component comp = new Component().name("comp1");
    app.addComponent(comp);
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS);
        Assert.fail(EXCEPTION_PREFIX + "service with no launch command");
    } catch (IllegalArgumentException e) {
        assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, e.getMessage());
    }
    app.setName(LEN_64_STR.substring(0, RegistryConstants.MAX_FQDN_LABEL_LENGTH));
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with no launch command");
    } catch (IllegalArgumentException e) {
        assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, e.getMessage());
    }
    comp.setLaunchCommand("sleep 1");
    Resource res = new Resource();
    app.setResource(res);
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with no memory");
    } catch (IllegalArgumentException e) {
        assertEquals(String.format(RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, comp.getName()), e.getMessage());
    }
    res.setMemory("100mb");
    res.setCpus(-2);
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with invalid no of cpus");
    } catch (IllegalArgumentException e) {
        assertEquals(String.format(RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, comp.getName()), e.getMessage());
    }
    res.setCpus(2);
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with no container count");
    } catch (IllegalArgumentException e) {
        Assert.assertTrue(e.getMessage().contains(ERROR_CONTAINERS_COUNT_INVALID));
    }
    res.setProfile("hbase_finance_large");
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with resource profile along with cpus/memory");
    } catch (IllegalArgumentException e) {
        assertEquals(String.format(RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, comp.getName()), e.getMessage());
    }
    res.setCpus(null);
    res.setMemory(null);
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "service with resource profile only");
    } catch (IllegalArgumentException e) {
        assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, e.getMessage());
    }
    res.setProfile(null);
    res.setCpus(2);
    res.setMemory("2gb");
    try {
        ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED);
        Assert.fail(EXCEPTION_PREFIX + "null number of containers");
    } catch (IllegalArgumentException e) {
        Assert.assertTrue(e.getMessage().startsWith(ERROR_CONTAINERS_COUNT_INVALID));
    }
}
301782.852692hadoop
private int executeListCommand(CommandLine cliParser, String title, Options opts) throws Exception {
    int exitCode = -1;
    if (APPLICATION.equalsIgnoreCase(title) || APP.equalsIgnoreCase(title)) {
        allAppStates = false;
        Set<String> appTypes = new HashSet<>();
        if (cliParser.hasOption(APP_TYPE_CMD)) {
            String[] types = cliParser.getOptionValues(APP_TYPE_CMD);
            if (types != null) {
                for (String type : types) {
                    if (!type.trim().isEmpty()) {
                        appTypes.add(StringUtils.toUpperCase(type).trim());
                    }
                }
            }
        }
        EnumSet<YarnApplicationState> appStates = EnumSet.noneOf(YarnApplicationState.class);
        if (cliParser.hasOption(APP_STATE_CMD)) {
            String[] states = cliParser.getOptionValues(APP_STATE_CMD);
            if (states != null) {
                for (String state : states) {
                    if (!state.trim().isEmpty()) {
                        if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) {
                            allAppStates = true;
                            break;
                        }
                        try {
                            appStates.add(YarnApplicationState.valueOf(StringUtils.toUpperCase(state).trim()));
                        } catch (IllegalArgumentException ex) {
                            sysout.println("The application state " + state + " is invalid.");
                            sysout.println(getAllValidApplicationStates());
                            return exitCode;
                        }
                    }
                }
            }
        }
        Set<String> appTags = new HashSet<>();
        if (cliParser.hasOption(APP_TAG_CMD)) {
            String[] tags = cliParser.getOptionValues(APP_TAG_CMD);
            if (tags != null) {
                for (String tag : tags) {
                    if (!tag.trim().isEmpty()) {
                        appTags.add(tag.trim());
                    }
                }
            }
        }
        listApplications(appTypes, appStates, appTags);
    } else if (APPLICATION_ATTEMPT.equalsIgnoreCase(title)) {
        if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD)) {
            printUsage(title, opts);
            return exitCode;
        }
        listApplicationAttempts(cliParser.getOptionValue(LIST_CMD));
    } else if (CONTAINER.equalsIgnoreCase(title)) {
        if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD, APP_TYPE_CMD, VERSION, COMPONENTS, STATES)) {
            printUsage(title, opts);
            return exitCode;
        }
        String appAttemptIdOrName = cliParser.getOptionValue(LIST_CMD);
        try {
            ApplicationAttemptId.fromString(appAttemptIdOrName);
            listContainers(appAttemptIdOrName);
        } catch (IllegalArgumentException e) {
            AppAdminClient client = AppAdminClient.createAppAdminClient(getSingleAppTypeFromCLI(cliParser), getConf());
            String version = cliParser.getOptionValue(VERSION);
            String[] components = cliParser.getOptionValues(COMPONENTS);
            String[] instanceStates = cliParser.getOptionValues(STATES);
            try {
                sysout.println(client.getInstances(appAttemptIdOrName, components == null ? null : Arrays.asList(components), version, instanceStates == null ? null : Arrays.asList(instanceStates)));
                return 0;
            } catch (ApplicationNotFoundException exception) {
                System.err.println("Application with name '" + appAttemptIdOrName + "' doesn't exist in RM or Timeline Server.");
                return -1;
            } catch (Exception ex) {
                System.err.println(ex.getMessage());
                return -1;
            }
        }
    }
    return 0;
}
302509.8314100hadoop
public void testPromotionFromAcquired() throws YarnException, IOException {
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.addContainerRequest(new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, true, null, ExecutionTypeRequest.newInstance(ExecutionType.OPPORTUNISTIC, true)));
    int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest.getNumContainers();
    assertEquals(1, oppContainersRequestedAny);
    assertEquals(1, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    int allocatedContainerCount = 0;
    Map<ContainerId, Container> allocatedOpportContainers = new HashMap<>();
    int iterationsLeft = 50;
    amClient.getNMTokenCache().clearCache();
    Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache());
    HashMap<String, Token> receivedNMTokens = new HashMap<>();
    updateMetrics("Before Opp Allocation");
    while (allocatedContainerCount < oppContainersRequestedAny && iterationsLeft-- > 0) {
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        assertEquals(0, amClient.ask.size());
        assertEquals(0, amClient.release.size());
        allocatedContainerCount += allocResponse.getAllocatedContainers().size();
        for (Container container : allocResponse.getAllocatedContainers()) {
            if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) {
                allocatedOpportContainers.put(container.getId(), container);
                removeCR(container);
            }
        }
        for (NMToken token : allocResponse.getNMTokens()) {
            String nodeID = token.getNodeId().toString();
            receivedNMTokens.put(nodeID, token.getToken());
        }
        if (allocatedContainerCount < oppContainersRequestedAny) {
            sleep(100);
        }
    }
    assertEquals(oppContainersRequestedAny, allocatedContainerCount);
    assertEquals(oppContainersRequestedAny, allocatedOpportContainers.size());
    updateMetrics("After Opp Allocation / Before Promotion");
    try {
        Container c = allocatedOpportContainers.values().iterator().next();
        amClient.requestContainerUpdate(c, UpdateContainerRequest.newInstance(c.getVersion(), c.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.OPPORTUNISTIC));
        Assert.fail("Should throw Exception..");
    } catch (IllegalArgumentException e) {
        System.out.println("## " + e.getMessage());
        Assert.assertTrue(e.getMessage().contains("target should be GUARANTEED and original should be OPPORTUNISTIC"));
    }
    Container c = allocatedOpportContainers.values().iterator().next();
    amClient.requestContainerUpdate(c, UpdateContainerRequest.newInstance(c.getVersion(), c.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE, null, ExecutionType.GUARANTEED));
    iterationsLeft = 120;
    Map<ContainerId, UpdatedContainer> updatedContainers = new HashMap<>();
    while (iterationsLeft-- > 0 && updatedContainers.isEmpty()) {
        AllocateResponse allocResponse = amClient.allocate(0.1f);
        if (allocResponse.getUpdatedContainers() != null) {
            for (UpdatedContainer updatedContainer : allocResponse.getUpdatedContainers()) {
                System.out.println("Got update..");
                updatedContainers.put(updatedContainer.getContainer().getId(), updatedContainer);
            }
        }
        if (iterationsLeft > 0) {
            sleep(100);
        }
    }
    updateMetrics("After Promotion");
    assertEquals(1, updatedContainers.size());
    for (ContainerId cId : allocatedOpportContainers.keySet()) {
        Container orig = allocatedOpportContainers.get(cId);
        UpdatedContainer updatedContainer = updatedContainers.get(cId);
        assertNotNull(updatedContainer);
        assertEquals(ExecutionType.GUARANTEED, updatedContainer.getContainer().getExecutionType());
        assertEquals(orig.getResource(), updatedContainer.getContainer().getResource());
        assertEquals(orig.getNodeId(), updatedContainer.getContainer().getNodeId());
        assertEquals(orig.getVersion() + 1, updatedContainer.getContainer().getVersion());
    }
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
    amClient.ask.clear();
}
302112.5718101hadoop
protected void render(Block html) {
    BlockParameters params = verifyAndParseParameters(html);
    if (params == null) {
        return;
    }
    RemoteIterator<FileStatus> nodeFiles;
    try {
        nodeFiles = LogAggregationUtils.getRemoteNodeFileDir(conf, params.getAppId(), params.getAppOwner(), remoteRootLogDir, remoteRootLogDirSuffix);
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception ex) {
        html.h1("No logs available for container " + params.getContainerId().toString());
        return;
    }
    NodeId nodeId = params.getNodeId();
    String logEntity = params.getLogEntity();
    ApplicationId appId = params.getAppId();
    ContainerId containerId = params.getContainerId();
    long start = params.getStartIndex();
    long end = params.getEndIndex();
    long startTime = params.getStartTime();
    long endTime = params.getEndTime();
    boolean foundLog = false;
    String desiredLogType = $(CONTAINER_LOG_TYPE);
    try {
        while (nodeFiles.hasNext()) {
            AggregatedLogFormat.LogReader reader = null;
            try {
                FileStatus thisNodeFile = nodeFiles.next();
                if (thisNodeFile.getPath().getName().equals(params.getAppId() + ".har")) {
                    Path p = new Path("har:///" + thisNodeFile.getPath().toUri().getRawPath());
                    nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
                    continue;
                }
                if (!thisNodeFile.getPath().getName().contains(LogAggregationUtils.getNodeString(nodeId)) || thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
                    continue;
                }
                long logUploadedTime = thisNodeFile.getModificationTime();
                if (logUploadedTime < startTime || logUploadedTime > endTime) {
                    continue;
                }
                reader = new AggregatedLogFormat.LogReader(conf, thisNodeFile.getPath());
                String owner = null;
                Map<ApplicationAccessType, String> appAcls = null;
                try {
                    owner = reader.getApplicationOwner();
                    appAcls = reader.getApplicationAcls();
                } catch (IOException e) {
                    LOG.error("Error getting logs for " + logEntity, e);
                    continue;
                }
                String remoteUser = request().getRemoteUser();
                if (!checkAcls(conf, appId, owner, appAcls, remoteUser)) {
                    html.h1().__("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity + " in log file [" + thisNodeFile.getPath().getName() + "]").__();
                    LOG.error("User [" + remoteUser + "] is not authorized to view the logs for " + logEntity);
                    continue;
                }
                AggregatedLogFormat.ContainerLogsReader logReader = reader.getContainerLogsReader(containerId);
                if (logReader == null) {
                    continue;
                }
                foundLog = readContainerLogs(html, logReader, start, end, desiredLogType, logUploadedTime, startTime, endTime);
            } catch (IOException ex) {
                LOG.error("Error getting logs for " + logEntity, ex);
                continue;
            } finally {
                if (reader != null) {
                    reader.close();
                }
            }
        }
        if (!foundLog) {
            if (desiredLogType.isEmpty()) {
                html.h1("No logs available for container " + containerId.toString());
            } else {
                html.h1("Unable to locate '" + desiredLogType + "' log for container " + containerId.toString());
            }
        }
    } catch (IOException e) {
        html.h1().__("Error getting logs for " + logEntity).__();
        LOG.error("Error getting logs for " + logEntity, e);
    }
}
301799.419102hadoop
public static ContainerReport convertToContainerReport(TimelineEntity entity, String serverAddress, String user) {
    int allocatedMem = 0;
    int allocatedVcore = 0;
    String allocatedHost = null;
    int allocatedPort = -1;
    int allocatedPriority = 0;
    long createdTime = 0;
    long finishedTime = 0;
    String diagnosticsInfo = null;
    int exitStatus = ContainerExitStatus.INVALID;
    ContainerState state = null;
    String nodeHttpAddress = null;
    Map<String, List<Map<String, String>>> exposedPorts = null;
    Map<String, Object> entityInfo = entity.getInfo();
    if (entityInfo != null) {
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO)) {
            allocatedMem = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_VCORE_INFO)) {
            allocatedVcore = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_VCORE_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_INFO)) {
            allocatedHost = entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_INFO).toString();
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PORT_INFO)) {
            allocatedPort = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_PORT_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO)) {
            allocatedPriority = Integer.parseInt(entityInfo.get(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO).toString());
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO)) {
            nodeHttpAddress = (String) entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_EXPOSED_PORTS)) {
            exposedPorts = (Map<String, List<Map<String, String>>>) entityInfo.get(ContainerMetricsConstants.ALLOCATED_EXPOSED_PORTS);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO)) {
            diagnosticsInfo = entityInfo.get(ContainerMetricsConstants.DIAGNOSTICS_INFO).toString();
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.EXIT_STATUS_INFO)) {
            exitStatus = (Integer) entityInfo.get(ContainerMetricsConstants.EXIT_STATUS_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.STATE_INFO)) {
            state = ContainerState.valueOf(entityInfo.get(ContainerMetricsConstants.STATE_INFO).toString());
        }
    }
    NavigableSet<TimelineEvent> events = entity.getEvents();
    if (events != null) {
        for (TimelineEvent event : events) {
            if (event.getId().equals(ContainerMetricsConstants.CREATED_IN_RM_EVENT_TYPE)) {
                createdTime = event.getTimestamp();
            } else if (event.getId().equals(ContainerMetricsConstants.FINISHED_IN_RM_EVENT_TYPE)) {
                finishedTime = event.getTimestamp();
            }
        }
    }
    String logUrl = null;
    NodeId allocatedNode = null;
    String containerId = entity.getId();
    if (allocatedHost != null) {
        allocatedNode = NodeId.newInstance(allocatedHost, allocatedPort);
        if (serverAddress != null && user != null) {
            logUrl = PATH_JOINER.join(serverAddress, "logs", allocatedNode, containerId, containerId, user);
        }
    }
    ContainerReport container = ContainerReport.newInstance(ContainerId.fromString(entity.getId()), Resource.newInstance(allocatedMem, allocatedVcore), allocatedNode, Priority.newInstance(allocatedPriority), createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state, nodeHttpAddress);
    container.setExposedPorts(exposedPorts);
    return container;
}
301865.222297hadoop
public void service(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException {
    res.setCharacterEncoding("UTF-8");
    String uri = HtmlQuoting.quoteHtmlChars(req.getRequestURI());
    if (uri == null) {
        uri = "/";
    }
    if (devMode && uri.equals("/__stop")) {
        res.setStatus(res.SC_NO_CONTENT);
        LOG.info("dev mode restart requested");
        prepareToExit();
        return;
    }
    if (uri.equals("/")) {
        String redirectPath = webApp.getRedirectPath();
        if (redirectPath != null && !redirectPath.isEmpty()) {
            if (req.getQueryString() != null) {
                StringBuilder query = new StringBuilder();
                query.append(redirectPath);
                query.append("?");
                query.append(req.getQueryString().replaceAll("\r", "").replaceAll("\n", ""));
                redirectPath = query.toString();
            }
            res.sendRedirect(redirectPath);
            return;
        }
    }
    String method = req.getMethod();
    if (method.equals("OPTIONS")) {
        doOptions(req, res);
        return;
    }
    if (method.equals("TRACE")) {
        doTrace(req, res);
        return;
    }
    if (method.equals("HEAD")) {
        doGet(req, res);
        return;
    }
    String pathInfo = req.getPathInfo();
    if (pathInfo == null) {
        pathInfo = "/";
    }
    Controller.RequestContext rc = injector.getInstance(Controller.RequestContext.class);
    if (uri.equals(RobotsTextPage.ROBOTS_TXT_PATH)) {
        rc.setStatus(HttpServletResponse.SC_FOUND);
        render(RobotsTextPage.class);
        return;
    }
    if (setCookieParams(rc, req) > 0) {
        Cookie ec = rc.cookies().get(ERROR_COOKIE);
        if (ec != null) {
            rc.setStatus(Integer.parseInt(rc.cookies().get(STATUS_COOKIE).getValue()));
            removeErrorCookies(res, uri);
            rc.set(Params.ERROR_DETAILS, ec.getValue());
            render(ErrorPage.class);
            return;
        }
    }
    rc.prefix = webApp.name();
    Router.Dest dest = null;
    try {
        dest = router.resolve(method, pathInfo);
    } catch (WebAppException e) {
        rc.error = e;
        if (!e.getMessage().contains("not found")) {
            rc.setStatus(res.SC_INTERNAL_SERVER_ERROR);
            render(ErrorPage.class);
            return;
        }
    }
    if (dest == null) {
        rc.setStatus(res.SC_NOT_FOUND);
        render(ErrorPage.class);
        return;
    }
    rc.devMode = devMode;
    setMoreParams(rc, pathInfo, dest);
    Controller controller = injector.getInstance(dest.controllerClass);
    try {
        dest.action.invoke(controller, (Object[]) null);
        if (!rc.rendered) {
            if (dest.defaultViewClass != null) {
                render(dest.defaultViewClass);
            } else if (rc.status == 200) {
                throw new IllegalStateException("No view rendered for 200");
            }
        }
    } catch (Exception e) {
        LOG.error("error handling URI: " + uri, e);
        redirectToErrorPage(res, e, uri, devMode);
    }
}
303005.133114hadoop
 void testTimelineEntities() throws Exception {
    TimelineEntity entity = new TimelineEntity();
    entity.setType("test type 1");
    entity.setId("test id 1");
    entity.addInfo("test info key 1", "test info value 1");
    entity.addInfo("test info key 2", Arrays.asList("test info value 2", "test info value 3"));
    entity.addInfo("test info key 3", true);
    assertTrue(entity.getInfo().get("test info key 3") instanceof Boolean);
    entity.addConfig("test config key 1", "test config value 1");
    entity.addConfig("test config key 2", "test config value 2");
    TimelineMetric metric1 = new TimelineMetric(TimelineMetric.Type.TIME_SERIES);
    metric1.setId("test metric id 1");
    metric1.addValue(1L, 1.0F);
    metric1.addValue(3L, 3.0D);
    metric1.addValue(2L, 2);
    assertEquals(TimelineMetric.Type.TIME_SERIES, metric1.getType());
    Iterator<Map.Entry<Long, Number>> itr = metric1.getValues().entrySet().iterator();
    Map.Entry<Long, Number> entry = itr.next();
    assertEquals(Long.valueOf(3L), entry.getKey());
    assertEquals(3.0D, entry.getValue());
    entry = itr.next();
    assertEquals(Long.valueOf(2L), entry.getKey());
    assertEquals(2, entry.getValue());
    entry = itr.next();
    assertEquals(Long.valueOf(1L), entry.getKey());
    assertEquals(1.0F, entry.getValue());
    assertFalse(itr.hasNext());
    entity.addMetric(metric1);
    TimelineMetric metric2 = new TimelineMetric(TimelineMetric.Type.SINGLE_VALUE);
    metric2.setId("test metric id 1");
    metric2.addValue(3L, (short) 3);
    assertEquals(TimelineMetric.Type.SINGLE_VALUE, metric2.getType());
    assertTrue(metric2.getValues().values().iterator().next() instanceof Short);
    Map<Long, Number> points = new HashMap<>();
    points.put(4L, 4.0D);
    points.put(5L, 5.0D);
    try {
        metric2.setValues(points);
        fail();
    } catch (IllegalArgumentException e) {
        assertTrue(e.getMessage().contains("Values cannot contain more than one point in"));
    }
    try {
        metric2.addValues(points);
        fail();
    } catch (IllegalArgumentException e) {
        assertTrue(e.getMessage().contains("Values cannot contain more than one point in"));
    }
    entity.addMetric(metric2);
    TimelineMetric metric3 = new TimelineMetric(TimelineMetric.Type.SINGLE_VALUE);
    metric3.setId("test metric id 1");
    metric3.addValue(4L, (short) 4);
    assertEquals(metric3, metric2, "metric3 should equal to metric2! ");
    assertNotEquals(metric1, metric2, "metric1 should not equal to metric2! ");
    TimelineEvent event1 = new TimelineEvent();
    event1.setId("test event id 1");
    event1.addInfo("test info key 1", "test info value 1");
    event1.addInfo("test info key 2", Arrays.asList("test info value 2", "test info value 3"));
    event1.addInfo("test info key 3", true);
    assertTrue(event1.getInfo().get("test info key 3") instanceof Boolean);
    event1.setTimestamp(1L);
    entity.addEvent(event1);
    TimelineEvent event2 = new TimelineEvent();
    event2.setId("test event id 2");
    event2.addInfo("test info key 1", "test info value 1");
    event2.addInfo("test info key 2", Arrays.asList("test info value 2", "test info value 3"));
    event2.addInfo("test info key 3", true);
    assertTrue(event2.getInfo().get("test info key 3") instanceof Boolean);
    event2.setTimestamp(2L);
    entity.addEvent(event2);
    assertNotEquals(event1, event2);
    TimelineEvent event3 = new TimelineEvent();
    event3.setId("test event id 1");
    event3.setTimestamp(1L);
    assertEquals(event3, event1, "event1 should equal to event3! ");
    assertNotEquals(event1, event2, "event1 should not equal to event2! ");
    entity.setCreatedTime(0L);
    entity.addRelatesToEntity("test type 2", "test id 2");
    entity.addRelatesToEntity("test type 3", "test id 3");
    entity.addIsRelatedToEntity("test type 4", "test id 4");
    entity.addIsRelatedToEntity("test type 5", "test id 5");
    LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entity, true));
    TimelineEntities entities = new TimelineEntities();
    TimelineEntity entity1 = new TimelineEntity();
    entities.addEntity(entity1);
    TimelineEntity entity2 = new TimelineEntity();
    entities.addEntity(entity2);
    LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entities, true));
    assertFalse(entity1.isValid(), "entity 1 should not be valid without type and id");
    entity1.setId("test id 2");
    entity1.setType("test type 2");
    entity2.setId("test id 1");
    entity2.setType("test type 1");
    assertEquals(entity, entity2, "Timeline entity should equal to entity2! ");
    assertNotEquals(entity1, entity, "entity1 should not equal to entity! ");
    assertEquals(entity1.compareTo(entity), 1, "entity should be less than entity1! ");
    assertEquals(entity.hashCode(), -28727840, "entity's hash code should be -28727840 but not " + entity.hashCode());
}
301829.918112hadoop
private static ContainerReport convertToContainerReport(TimelineEntity entity, String serverHttpAddress, String user) {
    int allocatedMem = 0;
    int allocatedVcore = 0;
    String allocatedHost = null;
    int allocatedPort = -1;
    int allocatedPriority = 0;
    long createdTime = 0;
    long finishedTime = 0;
    String diagnosticsInfo = null;
    int exitStatus = ContainerExitStatus.INVALID;
    ContainerState state = null;
    String nodeHttpAddress = null;
    Map<String, List<Map<String, String>>> exposedPorts = null;
    Map<String, Object> entityInfo = entity.getOtherInfo();
    if (entityInfo != null) {
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO)) {
            allocatedMem = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_MEMORY_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_VCORE_INFO)) {
            allocatedVcore = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_VCORE_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_INFO)) {
            allocatedHost = entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_INFO).toString();
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PORT_INFO)) {
            allocatedPort = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_PORT_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO)) {
            allocatedPriority = (Integer) entityInfo.get(ContainerMetricsConstants.ALLOCATED_PRIORITY_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO)) {
            nodeHttpAddress = (String) entityInfo.get(ContainerMetricsConstants.ALLOCATED_HOST_HTTP_ADDRESS_INFO);
        }
        if (entityInfo.containsKey(ContainerMetricsConstants.ALLOCATED_EXPOSED_PORTS)) {
            exposedPorts = (Map<String, List<Map<String, String>>>) entityInfo.get(ContainerMetricsConstants.ALLOCATED_EXPOSED_PORTS);
        }
    }
    List<TimelineEvent> events = entity.getEvents();
    if (events != null) {
        for (TimelineEvent event : events) {
            if (event.getEventType().equals(ContainerMetricsConstants.CREATED_EVENT_TYPE)) {
                createdTime = event.getTimestamp();
            } else if (event.getEventType().equals(ContainerMetricsConstants.FINISHED_EVENT_TYPE)) {
                finishedTime = event.getTimestamp();
                Map<String, Object> eventInfo = event.getEventInfo();
                if (eventInfo == null) {
                    continue;
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO)) {
                    diagnosticsInfo = eventInfo.get(ContainerMetricsConstants.DIAGNOSTICS_INFO).toString();
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.EXIT_STATUS_INFO)) {
                    exitStatus = (Integer) eventInfo.get(ContainerMetricsConstants.EXIT_STATUS_INFO);
                }
                if (eventInfo.containsKey(ContainerMetricsConstants.STATE_INFO)) {
                    state = ContainerState.valueOf(eventInfo.get(ContainerMetricsConstants.STATE_INFO).toString());
                }
            }
        }
    }
    ContainerId containerId = ContainerId.fromString(entity.getEntityId());
    String logUrl = null;
    NodeId allocatedNode = null;
    if (allocatedHost != null) {
        allocatedNode = NodeId.newInstance(allocatedHost, allocatedPort);
        logUrl = WebAppUtils.getAggregatedLogURL(serverHttpAddress, allocatedNode.toString(), containerId.toString(), containerId.toString(), user);
    }
    ContainerReport container = ContainerReport.newInstance(ContainerId.fromString(entity.getEntityId()), Resource.newInstance(allocatedMem, allocatedVcore), allocatedNode, Priority.newInstance(allocatedPriority), createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state, nodeHttpAddress);
    container.setExposedPorts(exposedPorts);
    return container;
}
303040.699106hadoop
private void testLaunchContainerCopyFiles(boolean https) throws Exception {
    if (Shell.WINDOWS) {
        BASE_TMP_PATH = new Path(new File("target").getAbsolutePath(), TestDefaultContainerExecutor.class.getSimpleName());
    }
    Path localDir = new Path(BASE_TMP_PATH, "localDir");
    List<String> localDirs = new ArrayList<String>();
    localDirs.add(localDir.toString());
    List<String> logDirs = new ArrayList<String>();
    Path logDir = new Path(BASE_TMP_PATH, "logDir");
    logDirs.add(logDir.toString());
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
    conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.toString());
    conf.set(YarnConfiguration.NM_LOG_DIRS, logDir.toString());
    FileContext lfs = FileContext.getLocalFSFileContext(conf);
    deleteTmpFiles();
    lfs.mkdir(BASE_TMP_PATH, FsPermission.getDefault(), true);
    DefaultContainerExecutor dce = new DefaultContainerExecutor(lfs);
    dce.setConf(conf);
    Container container = mock(Container.class);
    ContainerId cId = mock(ContainerId.class);
    ContainerLaunchContext context = mock(ContainerLaunchContext.class);
    HashMap<String, String> env = new HashMap<String, String>();
    env.put("LANG", "C");
    String appSubmitter = "nobody";
    String appId = "APP_ID";
    String containerId = "CONTAINER_ID";
    when(container.getContainerId()).thenReturn(cId);
    when(container.getLaunchContext()).thenReturn(context);
    when(cId.toString()).thenReturn(containerId);
    when(cId.getApplicationAttemptId()).thenReturn(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 0));
    when(context.getEnvironment()).thenReturn(env);
    Path scriptPath = new Path(BASE_TMP_PATH, "script");
    Path tokensPath = new Path(BASE_TMP_PATH, "tokens");
    Path keystorePath = new Path(BASE_TMP_PATH, "keystore");
    Path truststorePath = new Path(BASE_TMP_PATH, "truststore");
    writeStringToRelativePath(lfs, scriptPath, "script");
    writeStringToRelativePath(lfs, tokensPath, "tokens");
    if (https) {
        writeStringToRelativePath(lfs, keystorePath, "keystore");
        writeStringToRelativePath(lfs, truststorePath, "truststore");
    }
    Path workDir = localDir;
    Path pidFile = new Path(workDir, "pid.txt");
    dce.init(null);
    dce.activateContainer(cId, pidFile);
    ContainerStartContext.Builder ctxBuilder = new ContainerStartContext.Builder().setContainer(container).setNmPrivateContainerScriptPath(scriptPath).setNmPrivateTokensPath(tokensPath).setUser(appSubmitter).setAppId(appId).setContainerWorkDir(workDir).setLocalDirs(localDirs).setLogDirs(logDirs);
    if (https) {
        ctxBuilder.setNmPrivateTruststorePath(truststorePath).setNmPrivateKeystorePath(keystorePath);
    }
    ContainerStartContext ctx = ctxBuilder.build();
    lfs.delete(workDir, true);
    try {
        lfs.getFileStatus(workDir);
        Assert.fail("Expected FileNotFoundException on " + workDir);
    } catch (FileNotFoundException e) {
    }
    dce.launchContainer(ctx);
    Path finalScriptPath = new Path(workDir, ContainerLaunch.CONTAINER_SCRIPT);
    Path finalTokensPath = new Path(workDir, ContainerLaunch.FINAL_CONTAINER_TOKENS_FILE);
    Path finalKeystorePath = new Path(workDir, ContainerLaunch.KEYSTORE_FILE);
    Path finalTrustorePath = new Path(workDir, ContainerLaunch.TRUSTSTORE_FILE);
    Assert.assertTrue(lfs.getFileStatus(workDir).isDirectory());
    Assert.assertTrue(lfs.getFileStatus(finalScriptPath).isFile());
    Assert.assertTrue(lfs.getFileStatus(finalTokensPath).isFile());
    if (https) {
        Assert.assertTrue(lfs.getFileStatus(finalKeystorePath).isFile());
        Assert.assertTrue(lfs.getFileStatus(finalTrustorePath).isFile());
    } else {
        try {
            lfs.getFileStatus(finalKeystorePath);
            Assert.fail("Expected FileNotFoundException on " + finalKeystorePath);
        } catch (FileNotFoundException e) {
        }
        try {
            lfs.getFileStatus(finalTrustorePath);
            Assert.fail("Expected FileNotFoundException on " + finalKeystorePath);
        } catch (FileNotFoundException e) {
        }
    }
    Assert.assertEquals("script", readStringFromPath(lfs, finalScriptPath));
    Assert.assertEquals("tokens", readStringFromPath(lfs, finalTokensPath));
    if (https) {
        Assert.assertEquals("keystore", readStringFromPath(lfs, finalKeystorePath));
        Assert.assertEquals("truststore", readStringFromPath(lfs, finalTrustorePath));
    }
}
302154.2716108hadoop
public SubmitApplicationResponse submitApplication(SubmitApplicationRequest request) throws YarnException, IOException {
    ApplicationSubmissionContext submissionContext = request.getApplicationSubmissionContext();
    ApplicationId applicationId = submissionContext.getApplicationId();
    CallerContext callerContext = CallerContext.getCurrent();
    UserGroupInformation userUgi = null;
    String user = null;
    try {
        userUgi = UserGroupInformation.getCurrentUser();
        user = userUgi.getShortUserName();
    } catch (IOException ie) {
        LOG.warn("Unable to get the current user.", ie);
        RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST, ie.getMessage(), "ClientRMService", "Exception in submitting application", applicationId, callerContext, submissionContext.getQueue());
        throw RPCUtil.getRemoteException(ie);
    }
    checkTags(submissionContext.getApplicationTags());
    if (timelineServiceV2Enabled) {
        String value = null;
        try {
            for (String tag : submissionContext.getApplicationTags()) {
                if (tag.startsWith(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX + ":") || tag.startsWith(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX.toLowerCase() + ":")) {
                    value = tag.substring(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX.length() + 1);
                    Long.valueOf(value);
                }
            }
        } catch (NumberFormatException e) {
            LOG.warn("Invalid to flow run: " + value + ". Flow run should be a long integer", e);
            RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST, e.getMessage(), "ClientRMService", "Exception in submitting application", applicationId, submissionContext.getQueue());
            throw RPCUtil.getRemoteException(e);
        }
    }
    if (rmContext.getRMApps().get(applicationId) != null) {
        LOG.info("This is an earlier submitted application: " + applicationId);
        return SubmitApplicationResponse.newInstance();
    }
    ByteBuffer tokenConf = submissionContext.getAMContainerSpec().getTokensConf();
    if (tokenConf != null) {
        int maxSize = getConfig().getInt(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE, YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES);
        LOG.info("Using app provided configurations for delegation token renewal," + " total size = " + tokenConf.capacity());
        if (tokenConf.capacity() > maxSize) {
            throw new YarnException("Exceed " + YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE + " = " + maxSize + " bytes, current conf size = " + tokenConf.capacity() + " bytes.");
        }
    }
    if (submissionContext.getQueue() == null) {
        submissionContext.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME);
    }
    if (submissionContext.getApplicationName() == null) {
        submissionContext.setApplicationName(YarnConfiguration.DEFAULT_APPLICATION_NAME);
    }
    if (submissionContext.getApplicationType() == null) {
        submissionContext.setApplicationType(YarnConfiguration.DEFAULT_APPLICATION_TYPE);
    } else {
        if (submissionContext.getApplicationType().length() > YarnConfiguration.APPLICATION_TYPE_LENGTH) {
            submissionContext.setApplicationType(submissionContext.getApplicationType().substring(0, YarnConfiguration.APPLICATION_TYPE_LENGTH));
        }
    }
    ReservationId reservationId = request.getApplicationSubmissionContext().getReservationID();
    checkReservationACLs(submissionContext.getQueue(), AuditConstants.SUBMIT_RESERVATION_REQUEST, reservationId);
    if (this.contextPreProcessor != null) {
        this.contextPreProcessor.preProcess(Server.getRemoteIp().getHostName(), applicationId, submissionContext);
    }
    try {
        rmAppManager.submitApplication(submissionContext, System.currentTimeMillis(), userUgi);
        LOG.info("Application with id " + applicationId.getId() + " submitted by user " + user);
        RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_APP_REQUEST, "ClientRMService", applicationId, callerContext, submissionContext.getQueue(), submissionContext.getNodeLabelExpression());
    } catch (YarnException e) {
        LOG.info("Exception in submitting " + applicationId, e);
        RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST, e.getMessage(), "ClientRMService", "Exception in submitting application", applicationId, callerContext, submissionContext.getQueue(), submissionContext.getNodeLabelExpression());
        throw e;
    }
    return recordFactory.newRecordInstance(SubmitApplicationResponse.class);
}
302295.311997hadoop
private void validateReservationDefinition(ReservationId reservationId, ReservationDefinition contract, Plan plan, String auditConstant) throws YarnException {
    String message = "";
    if (contract == null) {
        message = "Missing reservation definition." + " Please try again by specifying a reservation definition.";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    if (contract.getDeadline() <= clock.getTime()) {
        message = "The specified deadline: " + contract.getDeadline() + " is the past. Please try again with deadline in the future.";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    ReservationRequests resReqs = contract.getReservationRequests();
    if (resReqs == null) {
        message = "No resources have been specified to reserve." + "Please try again by specifying the resources to reserve.";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    List<ReservationRequest> resReq = resReqs.getReservationResources();
    if (resReq == null || resReq.isEmpty()) {
        message = "No resources have been specified to reserve." + " Please try again by specifying the resources to reserve.";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    long minDuration = 0;
    Resource maxGangSize = Resource.newInstance(0, 0);
    ReservationRequestInterpreter type = contract.getReservationRequests().getInterpreter();
    for (ReservationRequest rr : resReq) {
        if (type == ReservationRequestInterpreter.R_ALL || type == ReservationRequestInterpreter.R_ANY) {
            minDuration = Math.max(minDuration, rr.getDuration());
        } else {
            minDuration += rr.getDuration();
        }
        maxGangSize = Resources.max(plan.getResourceCalculator(), plan.getTotalCapacity(), maxGangSize, Resources.multiply(rr.getCapability(), rr.getConcurrency()));
    }
    long duration = contract.getDeadline() - contract.getArrival();
    if (duration < minDuration && type != ReservationRequestInterpreter.R_ANY) {
        message = "The time difference (" + (duration) + ") between arrival (" + contract.getArrival() + ") " + "and deadline (" + contract.getDeadline() + ") must " + " be greater or equal to the minimum resource duration (" + minDuration + ")";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    if (Resources.greaterThan(plan.getResourceCalculator(), plan.getTotalCapacity(), maxGangSize, plan.getTotalCapacity()) && type != ReservationRequestInterpreter.R_ANY) {
        message = "The size of the largest gang in the reservation definition (" + maxGangSize + ") exceed the capacity available (" + plan.getTotalCapacity() + " )";
        RMAuditLogger.logFailure("UNKNOWN", auditConstant, "validate reservation input definition", "ClientRMService", message);
        throw RPCUtil.getRemoteException(message);
    }
    String recurrenceExpression = contract.getRecurrenceExpression();
    try {
        long recurrence = Long.parseLong(recurrenceExpression);
        if (recurrence < 0) {
            message = "Negative Period : " + recurrenceExpression + ". Please try" + " again with a non-negative long value as period.";
            throw RPCUtil.getRemoteException(message);
        }
        if (recurrence > 0 && duration > recurrence) {
            message = "Duration of the requested reservation: " + duration + " is greater than the recurrence: " + recurrence + ". Please try again with a smaller duration.";
            throw RPCUtil.getRemoteException(message);
        }
        if (recurrence > 0 && plan.getMaximumPeriodicity() % recurrence != 0) {
            message = "The maximum periodicity: " + plan.getMaximumPeriodicity() + " must be divisible by the recurrence expression provided: " + recurrence + ". Please try again with a recurrence expression" + " that satisfies this requirement.";
            throw RPCUtil.getRemoteException(message);
        }
    } catch (NumberFormatException e) {
        message = "Invalid period " + recurrenceExpression + ". Please try" + " again with a non-negative long value as period.";
        throw RPCUtil.getRemoteException(message);
    }
}
301886.716106hadoop
private ContainerAllocation preCheckForNodeCandidateSet(FiCaSchedulerNode node, SchedulingMode schedulingMode, ResourceLimits resourceLimits, SchedulerRequestKey schedulerKey) {
    PendingAsk offswitchPendingAsk = application.getPendingAsk(schedulerKey, ResourceRequest.ANY);
    if (offswitchPendingAsk.getCount() <= 0) {
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_DO_NOT_NEED_RESOURCE, ActivityLevel.REQUEST);
        return ContainerAllocation.PRIORITY_SKIPPED;
    }
    Resource required = offswitchPendingAsk.getPerAllocationResource();
    if (application.getOutstandingAsksCount(schedulerKey) <= 0) {
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_DO_NOT_NEED_RESOURCE, ActivityLevel.REQUEST);
        return ContainerAllocation.PRIORITY_SKIPPED;
    }
    if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
        if (application.isWaitingForAMContainer() && !rmContext.getYarnConfiguration().getBoolean(AM_ALLOW_NON_EXCLUSIVE_ALLOCATION, false)) {
            LOG.debug("Skip allocating AM container to app_attempt={}," + " don't allow to allocate AM container in non-exclusive mode", application.getApplicationAttemptId());
            application.updateAppSkipNodeDiagnostics("Skipping assigning to Node in Ignore Exclusivity mode. ");
            ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_SKIPPED_IN_IGNORE_EXCLUSIVITY_MODE, ActivityLevel.REQUEST);
            return ContainerAllocation.APP_SKIPPED;
        }
    }
    Optional<DiagnosticsCollector> dcOpt = activitiesManager == null ? Optional.empty() : activitiesManager.getOptionalDiagnosticsCollector();
    if (!appInfo.precheckNode(schedulerKey, node, schedulingMode, dcOpt)) {
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.NODE_DO_NOT_MATCH_PARTITION_OR_PLACEMENT_CONSTRAINTS + ActivitiesManager.getDiagnostics(dcOpt), ActivityLevel.NODE);
        return ContainerAllocation.PRIORITY_SKIPPED;
    }
    if (!application.getCSLeafQueue().isReservationsContinueLooking()) {
        if (!shouldAllocOrReserveNewContainer(schedulerKey, required)) {
            LOG.debug("doesn't need containers based on reservation algo!");
            ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_SKIPPED_BECAUSE_OF_RESERVATION, ActivityLevel.REQUEST);
            return ContainerAllocation.PRIORITY_SKIPPED;
        }
    }
    if (!checkHeadroom(resourceLimits, required, node.getPartition())) {
        LOG.debug("cannot allocate required resource={} because of headroom", required);
        ActivitiesLogger.APP.recordAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.QUEUE_DO_NOT_HAVE_ENOUGH_HEADROOM, ActivityState.REJECTED, ActivityLevel.REQUEST);
        return ContainerAllocation.QUEUE_SKIPPED;
    }
    int missedNonPartitionedRequestSchedulingOpportunity = 0;
    AppPlacementAllocator appPlacementAllocator = appInfo.getAppPlacementAllocator(schedulerKey);
    if (null == appPlacementAllocator) {
        ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_SKIPPED_BECAUSE_NULL_ANY_REQUEST, ActivityLevel.REQUEST);
        return ContainerAllocation.PRIORITY_SKIPPED;
    }
    String requestPartition = appPlacementAllocator.getPrimaryRequestedNodePartition();
    if (StringUtils.equals(RMNodeLabelsManager.NO_LABEL, requestPartition)) {
        missedNonPartitionedRequestSchedulingOpportunity = application.addMissedNonPartitionedRequestSchedulingOpportunity(schedulerKey);
    }
    if (schedulingMode == SchedulingMode.IGNORE_PARTITION_EXCLUSIVITY) {
        if (missedNonPartitionedRequestSchedulingOpportunity < rmContext.getScheduler().getNumClusterNodes()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Skip app_attempt=" + application.getApplicationAttemptId() + " priority=" + schedulerKey.getPriority() + " because missed-non-partitioned-resource-request" + " opportunity under required:" + " Now=" + missedNonPartitionedRequestSchedulingOpportunity + " required=" + rmContext.getScheduler().getNumClusterNodes());
            }
            ActivitiesLogger.APP.recordSkippedAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey, ActivityDiagnosticConstant.REQUEST_SKIPPED_BECAUSE_NON_PARTITIONED_PARTITION_FIRST, ActivityLevel.REQUEST);
            return ContainerAllocation.APP_SKIPPED;
        }
    }
    return null;
}
302299.4217101hadoop
public AppsInfo getApps(@Context HttpServletRequest hsr, @QueryParam(RMWSConsts.STATE) String stateQuery, @QueryParam(RMWSConsts.STATES) Set<String> statesQuery, @QueryParam(RMWSConsts.FINAL_STATUS) String finalStatusQuery, @QueryParam(RMWSConsts.USER) String userQuery, @QueryParam(RMWSConsts.QUEUE) String queueQuery, @QueryParam(RMWSConsts.LIMIT) String limit, @QueryParam(RMWSConsts.STARTED_TIME_BEGIN) String startedBegin, @QueryParam(RMWSConsts.STARTED_TIME_END) String startedEnd, @QueryParam(RMWSConsts.FINISHED_TIME_BEGIN) String finishBegin, @QueryParam(RMWSConsts.FINISHED_TIME_END) String finishEnd, @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> applicationTypes, @QueryParam(RMWSConsts.APPLICATION_TAGS) Set<String> applicationTags, @QueryParam(RMWSConsts.NAME) String name, @QueryParam(RMWSConsts.DESELECTS) Set<String> unselectedFields) {
    AppsCacheKey cacheKey = AppsCacheKey.newInstance(stateQuery, new HashSet<>(statesQuery), finalStatusQuery, userQuery, queueQuery, limit, startedBegin, startedEnd, finishBegin, finishEnd, new HashSet<>(applicationTypes), new HashSet<>(applicationTags), name, unselectedFields);
    if (this.enableAppsCache) {
        long successTimes = getAppsSuccessTimes.incrementAndGet();
        if (successTimes % 1000 == 0) {
            LOG.debug("hit cache info: getAppsSuccessTimes={}, hitAppsCacheTimes={}", successTimes, hitAppsCacheTimes.get());
        }
        AppsInfo appsInfo = appsLRUCache.get(cacheKey);
        if (appsInfo != null) {
            hitAppsCacheTimes.getAndIncrement();
            return appsInfo;
        }
    }
    initForReadableEndpoints();
    GetApplicationsRequest request = ApplicationsRequestBuilder.create().withStateQuery(stateQuery).withStatesQuery(statesQuery).withUserQuery(userQuery).withQueueQuery(rm, queueQuery).withLimit(limit).withStartedTimeBegin(startedBegin).withStartedTimeEnd(startedEnd).withFinishTimeBegin(finishBegin).withFinishTimeEnd(finishEnd).withApplicationTypes(applicationTypes).withApplicationTags(applicationTags).withName(name).build();
    List<ApplicationReport> appReports;
    try {
        appReports = rm.getClientRMService().getApplications(request).getApplicationList();
    } catch (YarnException e) {
        LOG.error("Unable to retrieve apps from ClientRMService", e);
        throw new YarnRuntimeException("Unable to retrieve apps from ClientRMService", e);
    }
    final ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps();
    AppsInfo allApps = new AppsInfo();
    for (ApplicationReport report : appReports) {
        RMApp rmapp = apps.get(report.getApplicationId());
        if (rmapp == null) {
            continue;
        }
        if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) {
            FinalApplicationStatus.valueOf(finalStatusQuery);
            if (!rmapp.getFinalApplicationStatus().toString().equalsIgnoreCase(finalStatusQuery)) {
                continue;
            }
        }
        DeSelectFields deSelectFields = new DeSelectFields();
        deSelectFields.initFields(unselectedFields);
        boolean allowAccess = hasAccess(rmapp, hsr);
        if (filterAppsByUser && !allowAccess) {
            continue;
        }
        AppInfo app = new AppInfo(rm, rmapp, allowAccess, WebAppUtils.getHttpSchemePrefix(conf), deSelectFields);
        allApps.add(app);
    }
    if (filterInvalidXMLChars) {
        final String format = hsr.getHeader(HttpHeaders.ACCEPT);
        if (format != null && format.toLowerCase().contains(MediaType.APPLICATION_XML)) {
            for (AppInfo appInfo : allApps.getApps()) {
                appInfo.setNote(escapeInvalidXMLCharacters(appInfo.getNote()));
            }
        }
    }
    if (enableAppsCache) {
        appsLRUCache.put(cacheKey, allApps);
        getAppsSuccessTimes.getAndIncrement();
    }
    return allApps;
}
303769.061104hadoop
public void testHeadroom() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setUserLimit(A_QUEUE_PATH, 25);
    setupQueueConfiguration(csConf);
    Resource clusterResource = Resources.createResource(100 * 16 * GB);
    CapacitySchedulerContext context = createCSContext(csConf, resourceCalculator, Resources.createResource(GB), Resources.createResource(16 * GB), clusterResource);
    CapacitySchedulerQueueManager queueManager = context.getCapacitySchedulerQueueManager();
    CapacitySchedulerQueueContext queueContext = new CapacitySchedulerQueueContext(context);
    CSQueueStore queues = new CSQueueStore();
    CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(queueContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
    queueManager.setRootQueue(rootQueue);
    rootQueue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    ResourceUsage queueCapacities = rootQueue.getQueueResourceUsage();
    when(context.getClusterResourceUsage()).thenReturn(queueCapacities);
    LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get(A));
    queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    String host_0 = "host_0";
    String rack_0 = "rack_0";
    FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 16 * GB);
    final String user_0 = "user_0";
    final String user_1 = "user_1";
    RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
    RMContext rmContext = TestUtils.getMockRMContext();
    RMContext spyRMContext = spy(rmContext);
    ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
    RMApp rmApp = mock(RMApp.class);
    ResourceRequest amResourceRequest = mock(ResourceRequest.class);
    Resource amResource = Resources.createResource(0, 0);
    when(amResourceRequest.getCapability()).thenReturn(amResource);
    when(rmApp.getAMResourceRequests()).thenReturn(Collections.singletonList(amResourceRequest));
    Mockito.doReturn(rmApp).when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
    when(spyRMContext.getRMApps()).thenReturn(spyApps);
    RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
    when(rmApp.getRMAppAttempt(any())).thenReturn(rmAppAttempt);
    when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
    Mockito.doReturn(rmApp).when(spyApps).get(ArgumentMatchers.<ApplicationId>any());
    Mockito.doReturn(true).when(spyApps).containsKey(ArgumentMatchers.<ApplicationId>any());
    Priority priority_1 = TestUtils.createMockPriority(1);
    final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
    FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_0, user_0);
    List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
    app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_0.updateResourceRequests(app_0_0_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    Resource expectedHeadroom = Resources.createResource(5 * 16 * GB, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
    FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_0_1, user_0);
    List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
    app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_0_1.updateResourceRequests(app_0_1_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
    FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
    queue.submitApplicationAttempt(app_1_0, user_1);
    List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
    app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
    app_1_0.updateResourceRequests(app_1_0_requests);
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    expectedHeadroom = Resources.createResource(10 * 16 * GB / 2, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    assertEquals(expectedHeadroom, app_1_0.getHeadroom());
    clusterResource = Resources.createResource(90 * 16 * GB);
    rootQueue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
    queue.getUsersManager().userLimitNeedsRecompute();
    queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
    expectedHeadroom = Resources.createResource(9 * 16 * GB / 2, 1);
    assertEquals(expectedHeadroom, app_0_0.getHeadroom());
    assertEquals(expectedHeadroom, app_0_1.getHeadroom());
    assertEquals(expectedHeadroom, app_1_0.getHeadroom());
}
303069.082117hadoop
public void testMoveAppWithActiveUsersWithOnlyPendingApps() throws Exception {
    YarnConfiguration conf = new YarnConfiguration();
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
    CapacitySchedulerConfiguration newConf = new CapacitySchedulerConfiguration(conf);
    newConf.setQueues(ROOT, new String[] { "a", "b" });
    newConf.setCapacity(A, 50);
    newConf.setCapacity(B, 50);
    newConf.setQueues(A, new String[] { "a1" });
    newConf.setCapacity(A1, 100);
    newConf.setUserLimitFactor(A1, 2.0f);
    newConf.setMaximumAMResourcePercentPerPartition(A1, "", 0.1f);
    newConf.setQueues(B, new String[] { "b1" });
    newConf.setCapacity(B1, 100);
    newConf.setUserLimitFactor(B1, 2.0f);
    MockRM rm = new MockRM(newConf);
    rm.start();
    CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler();
    MockNM nm1 = rm.registerNode("h1:1234", 16 * GB);
    MockRMAppSubmissionData data3 = MockRMAppSubmissionData.Builder.createWithMemory(GB, rm).withAppName("test-move-1").withUser("u1").withAcls(null).withQueue("a1").withUnmanagedAM(false).build();
    RMApp app = MockRMAppSubmitter.submit(rm, data3);
    MockAM am1 = MockRM.launchAndRegisterAM(app, rm, nm1);
    ApplicationAttemptId appAttemptId = rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
    MockRMAppSubmissionData data2 = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("u2").withAcls(null).withQueue("a1").withUnmanagedAM(false).build();
    RMApp app2 = MockRMAppSubmitter.submit(rm, data2);
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
    MockRMAppSubmissionData data1 = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("u3").withAcls(null).withQueue("a1").withUnmanagedAM(false).build();
    RMApp app3 = MockRMAppSubmitter.submit(rm, data1);
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("u4").withAcls(null).withQueue("a1").withUnmanagedAM(false).build();
    RMApp app4 = MockRMAppSubmitter.submit(rm, data);
    am1.allocate("*", 1 * GB, 50, null);
    am2.allocate("*", 1 * GB, 50, null);
    CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    assertApps(scheduler, "root", app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId(), appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId());
    assertApps(scheduler, "a", app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId(), appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId());
    assertApps(scheduler, "a1", app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId(), appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId());
    assertApps(scheduler, "b");
    assertApps(scheduler, "b1");
    UsersManager um = (UsersManager) scheduler.getQueue("a1").getAbstractUsersManager();
    assertEquals(4, um.getNumActiveUsers());
    assertEquals(2, um.getNumActiveUsersWithOnlyPendingApps());
    scheduler.moveAllApps("a1", "b1");
    for (int i = 0; i < 10; i++) {
        cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
        Thread.sleep(500);
    }
    assertApps(scheduler, "root", appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId(), app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId());
    assertApps(scheduler, "a");
    assertApps(scheduler, "a1");
    assertApps(scheduler, "b", appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId(), app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId());
    assertApps(scheduler, "b1", appAttemptId, app2.getCurrentAppAttempt().getAppAttemptId(), app3.getCurrentAppAttempt().getAppAttemptId(), app4.getCurrentAppAttempt().getAppAttemptId());
    UsersManager umB1 = (UsersManager) scheduler.getQueue("b1").getAbstractUsersManager();
    assertEquals(2, umB1.getNumActiveUsers());
    assertEquals(2, umB1.getNumActiveUsersWithOnlyPendingApps());
    rm.close();
}
303141.521113hadoop
public void testSchedulerKeyGarbageCollection() throws Exception {
    YarnConfiguration conf = new YarnConfiguration(new CapacitySchedulerConfiguration());
    conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
    MockRM rm = new MockRM(conf);
    rm.start();
    HashMap<NodeId, MockNM> nodes = new HashMap<>();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm1.getNodeId(), nm1);
    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm2.getNodeId(), nm2);
    MockNM nm3 = new MockNM("h3:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm3.getNodeId(), nm3);
    MockNM nm4 = new MockNM("h4:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm4.getNodeId(), nm4);
    nm1.registerNode();
    nm2.registerNode();
    nm3.registerNode();
    nm4.registerNode();
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm).withAppName("app").withUser("user").withAcls(null).withQueue("default").withUnmanagedAM(false).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm, data);
    ApplicationAttemptId attemptId = app1.getCurrentAppAttempt().getAppAttemptId();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    nm3.nodeHeartbeat(true);
    nm4.nodeHeartbeat(true);
    Thread.sleep(1000);
    AllocateResponse allocateResponse = am1.allocate(Arrays.asList(newResourceRequest(1, 1, ResourceRequest.ANY, Resources.createResource(3 * GB), 1, true, ExecutionType.GUARANTEED), newResourceRequest(2, 2, ResourceRequest.ANY, Resources.createResource(3 * GB), 1, true, ExecutionType.GUARANTEED), newResourceRequest(3, 3, ResourceRequest.ANY, Resources.createResource(3 * GB), 1, true, ExecutionType.GUARANTEED), newResourceRequest(4, 4, ResourceRequest.ANY, Resources.createResource(3 * GB), 1, true, ExecutionType.GUARANTEED)), null);
    List<Container> allocatedContainers = allocateResponse.getAllocatedContainers();
    Assert.assertEquals(0, allocatedContainers.size());
    Collection<SchedulerRequestKey> schedulerKeys = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId).getAppSchedulingInfo().getSchedulerKeys();
    Assert.assertEquals(4, schedulerKeys.size());
    nm1.nodeHeartbeat(true);
    Thread.sleep(200);
    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    allocatedContainers = allocateResponse.getAllocatedContainers();
    Assert.assertEquals(1, allocatedContainers.size());
    Assert.assertEquals(3, schedulerKeys.size());
    List<ResourceRequest> resReqs = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId).getAppSchedulingInfo().getAllResourceRequests();
    Assert.assertEquals(3, resReqs.size());
    nm2.nodeHeartbeat(true);
    Thread.sleep(200);
    allocateResponse = am1.allocate(Arrays.asList(newResourceRequest(1, allocatedContainers.get(0).getAllocationRequestId(), ResourceRequest.ANY, Resources.createResource(3 * GB), 0, true, ExecutionType.GUARANTEED)), new ArrayList<>());
    allocatedContainers = allocateResponse.getAllocatedContainers();
    Assert.assertEquals(1, allocatedContainers.size());
    Assert.assertEquals(2, schedulerKeys.size());
    resReqs = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId).getAppSchedulingInfo().getAllResourceRequests();
    Assert.assertEquals(2, resReqs.size());
    SchedulerRequestKey sk = schedulerKeys.iterator().next();
    am1.allocate(Arrays.asList(newResourceRequest(sk.getPriority().getPriority(), sk.getAllocationRequestId(), ResourceRequest.ANY, Resources.createResource(3 * GB), 0, true, ExecutionType.GUARANTEED)), null);
    schedulerKeys = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId).getAppSchedulingInfo().getSchedulerKeys();
    Thread.sleep(200);
    Assert.assertEquals(1, schedulerKeys.size());
    nm3.nodeHeartbeat(true);
    Thread.sleep(200);
    allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
    allocatedContainers = allocateResponse.getAllocatedContainers();
    Assert.assertEquals(1, allocatedContainers.size());
    Assert.assertEquals(0, schedulerKeys.size());
    resReqs = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId).getAppSchedulingInfo().getAllResourceRequests();
    Assert.assertEquals(0, resReqs.size());
    rm.stop();
}
303139.022115hadoop
public void testDecreaseAfterIncreaseWithAllocationExpiration() throws Exception {
    conf.setLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 5000);
    MockRM rm1 = new MockRM(conf);
    rm1.start();
    MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 20 * GB);
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm1).withAppName("app").withUser("user").withAcls(null).withQueue("default").withUnmanagedAM(false).build();
    RMApp app1 = MockRMAppSubmitter.submit(rm1, data);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 1, ContainerState.RUNNING);
    am1.allocate("127.0.0.1", 3 * GB, 3, new ArrayList<ContainerId>());
    ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
    rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
    ContainerId containerId3 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
    rm1.waitForState(nm1, containerId3, RMContainerState.ALLOCATED);
    ContainerId containerId4 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 4);
    rm1.waitForState(nm1, containerId4, RMContainerState.ALLOCATED);
    List<Container> containers = am1.allocate(null, null).getAllocatedContainers();
    Assert.assertEquals(3, containers.size());
    Assert.assertNotNull(containers.get(0).getContainerToken());
    Assert.assertNotNull(containers.get(1).getContainerToken());
    Assert.assertNotNull(containers.get(2).getContainerToken());
    nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 2, ContainerState.RUNNING);
    nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 3, ContainerState.RUNNING);
    nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 4, ContainerState.RUNNING);
    rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
    rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
    rm1.waitForState(nm1, containerId4, RMContainerState.RUNNING);
    List<UpdateContainerRequest> increaseRequests = new ArrayList<>();
    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId2, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(6 * GB), null));
    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId3, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(6 * GB), null));
    increaseRequests.add(UpdateContainerRequest.newInstance(0, containerId4, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(6 * GB), null));
    am1.sendContainerResizingRequest(increaseRequests);
    nm1.nodeHeartbeat(true);
    Thread.sleep(1000);
    am1.allocate(null, null);
    List<UpdateContainerRequest> decreaseRequests = new ArrayList<>();
    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId2, ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(2 * GB), null));
    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId3, ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(4 * GB), null));
    decreaseRequests.add(UpdateContainerRequest.newInstance(1, containerId4, ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(4 * GB), null));
    AllocateResponse response = am1.sendContainerResizingRequest(decreaseRequests);
    Assert.assertEquals(3, response.getUpdatedContainers().size());
    nm1.containerIncreaseStatus(getContainer(rm1, containerId4, Resources.createResource(6 * GB)));
    Thread.sleep(12000);
    am1.allocate(null, null);
    rm1.drainEvents();
    Assert.assertEquals(2 * GB, rm1.getResourceScheduler().getRMContainer(containerId2).getAllocatedResource().getMemorySize());
    Assert.assertEquals(3 * GB, rm1.getResourceScheduler().getRMContainer(containerId3).getAllocatedResource().getMemorySize());
    Assert.assertEquals(4 * GB, rm1.getResourceScheduler().getRMContainer(containerId4).getAllocatedResource().getMemorySize());
    List<Container> containersToDecrease = nm1.nodeHeartbeat(true).getContainersToUpdate();
    Assert.assertTrue(containersToDecrease.size() >= 2);
    Collections.sort(containersToDecrease);
    int i = 0;
    if (containersToDecrease.size() > 2) {
        Assert.assertEquals(2 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
    }
    Assert.assertEquals(3 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
    Assert.assertEquals(4 * GB, containersToDecrease.get(i++).getResource().getMemorySize());
    rm1.stop();
}
303060.371116hadoop
public void testNodeAllocationTagsAggregation() throws InvalidAllocationTagsQueryException {
    RMContext mockContext = Mockito.spy(rmContext);
    ApplicationId app1 = TestUtils.getMockApplicationId(1);
    ApplicationId app2 = TestUtils.getMockApplicationId(2);
    ApplicationId app3 = TestUtils.getMockApplicationId(3);
    NodeId host1 = NodeId.fromString("host1:123");
    NodeId host2 = NodeId.fromString("host2:123");
    NodeId host3 = NodeId.fromString("host3:123");
    ConcurrentMap<ApplicationId, RMApp> allApps = new ConcurrentHashMap<>();
    allApps.put(app1, new MockRMApp(123, 1000, RMAppState.NEW, "userA", ImmutableSet.of("")));
    allApps.put(app2, new MockRMApp(124, 1001, RMAppState.NEW, "userA", ImmutableSet.of("")));
    allApps.put(app3, new MockRMApp(125, 1002, RMAppState.NEW, "userA", ImmutableSet.of("")));
    Mockito.when(mockContext.getRMApps()).thenReturn(allApps);
    AllocationTagsManager atm = new AllocationTagsManager(mockContext);
    atm.addContainer(host1, TestUtils.getMockContainerId(1, 1), ImmutableSet.of("A", "B"));
    atm.addContainer(host1, TestUtils.getMockContainerId(1, 2), ImmutableSet.of("A"));
    atm.addContainer(host1, TestUtils.getMockContainerId(2, 1), ImmutableSet.of("A"));
    atm.addContainer(host1, TestUtils.getMockContainerId(2, 2), ImmutableSet.of("A"));
    atm.addContainer(host1, TestUtils.getMockContainerId(2, 3), ImmutableSet.of("A"));
    atm.addContainer(host1, TestUtils.getMockContainerId(3, 1), ImmutableSet.of("A"));
    atm.addContainer(host2, TestUtils.getMockContainerId(1, 3), ImmutableSet.of("C"));
    atm.addContainer(host2, TestUtils.getMockContainerId(2, 4), ImmutableSet.of("A"));
    atm.addContainer(host2, TestUtils.getMockContainerId(2, 5), ImmutableSet.of("B"));
    atm.addContainer(host2, TestUtils.getMockContainerId(2, 6), ImmutableSet.of("B"));
    atm.addContainer(host2, TestUtils.getMockContainerId(3, 2), ImmutableSet.of("B"));
    atm.addContainer(host3, TestUtils.getMockContainerId(2, 7), ImmutableSet.of("D"));
    atm.addContainer(host3, TestUtils.getMockContainerId(3, 3), ImmutableSet.of("D"));
    TargetApplications ta = new TargetApplications(app1, ImmutableSet.of(app1, app2, app3));
    AllocationTags tags = AllocationTags.createSingleAppAllocationTags(app1, ImmutableSet.of("A", "C"));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(host1, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host1, tags, Long::min));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(host2, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host2, tags, Long::min));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::min));
    tags = AllocationTags.createOtherAppAllocationTags(app1, ImmutableSet.of("A", "B"));
    Assert.assertEquals(4, atm.getNodeCardinalityByOp(host1, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host1, tags, Long::min));
    Assert.assertEquals(4, atm.getNodeCardinalityByOp(host1, tags, Long::sum));
    tags = AllocationTags.createSingleAppAllocationTags(app2, ImmutableSet.of("A", "B"));
    Assert.assertEquals(3, atm.getNodeCardinalityByOp(host1, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host1, tags, Long::min));
    Assert.assertEquals(2, atm.getNodeCardinalityByOp(host2, tags, Long::max));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(host2, tags, Long::min));
    Assert.assertEquals(3, atm.getNodeCardinalityByOp(host2, tags, Long::sum));
    tags = AllocationTags.createGlobalAllocationTags(ImmutableSet.of("A"));
    Assert.assertEquals(6, atm.getNodeCardinalityByOp(host1, tags, Long::sum));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(host2, tags, Long::sum));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::sum));
    tags = AllocationTags.createGlobalAllocationTags(ImmutableSet.of("A", "B"));
    Assert.assertEquals(7, atm.getNodeCardinalityByOp(host1, tags, Long::sum));
    Assert.assertEquals(4, atm.getNodeCardinalityByOp(host2, tags, Long::sum));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::sum));
    Assert.assertEquals(6, atm.getNodeCardinalityByOp(host1, tags, Long::max));
    Assert.assertEquals(3, atm.getNodeCardinalityByOp(host2, tags, Long::max));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::max));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(host1, tags, Long::min));
    Assert.assertEquals(1, atm.getNodeCardinalityByOp(host2, tags, Long::min));
    Assert.assertEquals(0, atm.getNodeCardinalityByOp(host3, tags, Long::min));
}
303068.032109hadoop
public void testQueueMaxAMShareDefault() throws Exception {
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 6);
    AllocationFileWriter.create().fairDefaultQueueSchedulingPolicy().addQueue(new AllocationFileQueue.Builder("queue1").build()).addQueue(new AllocationFileQueue.Builder("queue2").maxAMShare(0.4f).build()).addQueue(new AllocationFileQueue.Builder("queue3").maxResources("10240 mb 4 vcores").build()).addQueue(new AllocationFileQueue.Builder("queue4").build()).addQueue(new AllocationFileQueue.Builder("queue5").build()).writeToFile(ALLOC_FILE);
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(8192, 10), 0, "127.0.0.1");
    NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
    NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
    scheduler.handle(nodeEvent);
    scheduler.update();
    FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
    assertEquals("Queue queue1's fair share should be 0", 0, queue1.getFairShare().getMemorySize());
    FSLeafQueue queue2 = scheduler.getQueueManager().getLeafQueue("queue2", true);
    assertEquals("Queue queue2's fair share should be 0", 0, queue2.getFairShare().getMemorySize());
    FSLeafQueue queue3 = scheduler.getQueueManager().getLeafQueue("queue3", true);
    assertEquals("Queue queue3's fair share should be 0", 0, queue3.getFairShare().getMemorySize());
    FSLeafQueue queue4 = scheduler.getQueueManager().getLeafQueue("queue4", true);
    assertEquals("Queue queue4's fair share should be 0", 0, queue4.getFairShare().getMemorySize());
    FSLeafQueue queue5 = scheduler.getQueueManager().getLeafQueue("queue5", true);
    assertEquals("Queue queue5's fair share should be 0", 0, queue5.getFairShare().getMemorySize());
    List<String> queues = Arrays.asList("root.queue3", "root.queue4", "root.queue5");
    for (String queue : queues) {
        createSchedulingRequest(1 * 1024, queue, "user1");
        scheduler.update();
        scheduler.handle(updateEvent);
    }
    Resource amResource1 = Resource.newInstance(1024, 1);
    int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
    ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
    createApplicationWithAMResource(attId1, "queue1", "test1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId1);
    FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application1's AM requests 1024 MB memory", 1024, app1.getAMResource().getMemorySize());
    assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size());
    assertEquals("Queue1's AM resource usage should be 1024 MB memory", 1024, queue1.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
    createApplicationWithAMResource(attId2, "queue2", "test1", amResource1);
    createSchedulingRequestExistingApplication(1024, 1, amPriority, attId2);
    FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application2's AM resource shouldn't be updated", 0, app2.getAMResource().getMemorySize());
    assertEquals("Application2's AM should not be running", 0, app2.getLiveContainers().size());
    assertEquals("Queue2's AM resource usage should be 0 MB memory", 0, queue2.getAmResourceUsage().getMemorySize());
    AppAttemptRemovedSchedulerEvent appRemovedEvent2 = new AppAttemptRemovedSchedulerEvent(attId2, RMAppAttemptState.FINISHED, false);
    scheduler.handle(appRemovedEvent2);
    scheduler.update();
    ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
    createApplicationWithAMResource(attId3, "queue3", "test1", amResource1);
    createSchedulingRequestExistingApplication(1024, 6, amPriority, attId3);
    FSAppAttempt app3 = scheduler.getSchedulerApp(attId3);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application3's AM resource shouldn't be updated", 0, app3.getAMResource().getMemorySize());
    assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size());
    assertEquals("Queue3's AM resource usage should be 0 MB memory", 0, queue3.getAmResourceUsage().getMemorySize());
    ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
    createApplicationWithAMResource(attId4, "queue3", "test1", amResource1);
    createSchedulingRequestExistingApplication(1024, 5, amPriority, attId4);
    FSAppAttempt app4 = scheduler.getSchedulerApp(attId4);
    scheduler.update();
    scheduler.handle(updateEvent);
    assertEquals("Application4's AM resource shouldn't be updated", 0, app4.getAMResource().getMemorySize());
    assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size());
    assertEquals("Queue3's AM resource usage should be 0 MB memory", 0, queue3.getAmResourceUsage().getMemorySize());
}
303202.22114hadoop
public void testCapacitySchedulerRecovery() throws Exception {
    if (getSchedulerType() != SchedulerType.CAPACITY) {
        return;
    }
    conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
    conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getName());
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
    setupQueueConfiguration(csConf);
    rm1 = new MockRM(csConf);
    rm1.start();
    MockNM nm1 = new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
    MockNM nm2 = new MockNM("127.1.1.1:4321", 8192, rm1.getResourceTrackerService());
    nm1.registerNode();
    nm2.registerNode();
    MockRMAppSubmissionData data2 = MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1).withAppName("app1_1").withUser(USER_1).withAcls(null).withQueue(A).withUnmanagedAM(false).build();
    RMApp app1_1 = MockRMAppSubmitter.submit(rm1, data2);
    MockAM am1_1 = MockRM.launchAndRegisterAM(app1_1, rm1, nm1);
    MockRMAppSubmissionData data1 = MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1).withAppName("app1_2").withUser(USER_1).withAcls(null).withQueue(A).withUnmanagedAM(false).build();
    RMApp app1_2 = MockRMAppSubmitter.submit(rm1, data1);
    MockAM am1_2 = MockRM.launchAndRegisterAM(app1_2, rm1, nm2);
    MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(1024, rm1).withAppName("app2").withUser(USER_2).withAcls(null).withQueue(B).withUnmanagedAM(false).build();
    RMApp app2 = MockRMAppSubmitter.submit(rm1, data);
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
    rm1.clearQueueMetrics(app1_1);
    rm1.clearQueueMetrics(app1_2);
    rm1.clearQueueMetrics(app2);
    csConf.set(PREFIX + "root.Default.QueueB.state", "STOPPED");
    rm2 = new MockRM(csConf, rm1.getRMStateStore());
    rm2.start();
    nm1.setResourceTrackerService(rm2.getResourceTrackerService());
    nm2.setResourceTrackerService(rm2.getResourceTrackerService());
    List<NMContainerStatus> am1_1Containers = createNMContainerStatusForApp(am1_1);
    List<NMContainerStatus> am1_2Containers = createNMContainerStatusForApp(am1_2);
    am1_1Containers.addAll(am1_2Containers);
    nm1.registerNode(am1_1Containers, null);
    List<NMContainerStatus> am2Containers = createNMContainerStatusForApp(am2);
    nm2.registerNode(am2Containers, null);
    waitForNumContainersToRecover(2, rm2, am1_1.getApplicationAttemptId());
    waitForNumContainersToRecover(2, rm2, am1_2.getApplicationAttemptId());
    waitForNumContainersToRecover(2, rm2, am2.getApplicationAttemptId());
    Resource containerResource = Resource.newInstance(1024, 1);
    Resource nmResource = Resource.newInstance(nm1.getMemory(), nm1.getvCores());
    Resource clusterResource = Resources.multiply(nmResource, 2);
    Resource q1Resource = Resources.multiply(clusterResource, 0.5);
    Resource q2Resource = Resources.multiply(clusterResource, 0.5);
    Resource q1UsedResource = Resources.multiply(containerResource, 4);
    Resource q2UsedResource = Resources.multiply(containerResource, 2);
    Resource totalUsedResource = Resources.add(q1UsedResource, q2UsedResource);
    Resource q1availableResources = Resources.subtract(q1Resource, q1UsedResource);
    Resource q2availableResources = Resources.subtract(q2Resource, q2UsedResource);
    Resource totalAvailableResource = Resources.add(q1availableResources, q2availableResources);
    Map<ApplicationId, SchedulerApplication> schedulerApps = ((AbstractYarnScheduler) rm2.getResourceScheduler()).getSchedulerApplications();
    SchedulerApplication schedulerApp1_1 = schedulerApps.get(app1_1.getApplicationId());
    checkCSLeafQueue(rm2, schedulerApp1_1, clusterResource, q1Resource, q1UsedResource, 4);
    QueueMetrics queue1Metrics = schedulerApp1_1.getQueue().getMetrics();
    assertMetrics(queue1Metrics, 2, 0, 2, 0, 4, q1availableResources.getMemorySize(), q1availableResources.getVirtualCores(), q1UsedResource.getMemorySize(), q1UsedResource.getVirtualCores());
    SchedulerApplication schedulerApp2 = schedulerApps.get(app2.getApplicationId());
    checkCSLeafQueue(rm2, schedulerApp2, clusterResource, q2Resource, q2UsedResource, 2);
    QueueMetrics queue2Metrics = schedulerApp2.getQueue().getMetrics();
    assertMetrics(queue2Metrics, 1, 0, 1, 0, 2, q2availableResources.getMemorySize(), q2availableResources.getVirtualCores(), q2UsedResource.getMemorySize(), q2UsedResource.getVirtualCores());
    LeafQueue leafQueue = (LeafQueue) schedulerApp2.getQueue();
    ParentQueue parentQueue = (ParentQueue) leafQueue.getParent();
    checkParentQueue(parentQueue, 6, totalUsedResource, (float) 6 / 16, (float) 6 / 16);
    assertMetrics(parentQueue.getMetrics(), 3, 0, 3, 0, 6, totalAvailableResource.getMemorySize(), totalAvailableResource.getVirtualCores(), totalUsedResource.getMemorySize(), totalUsedResource.getVirtualCores());
}
302438.572125hadoop
 void testRelationFiltersParsing() throws Exception {
    String expr = "type1:entity11,type2:entity21:entity22";
    TimelineFilterList expectedList = new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", Sets.newHashSet((Object) "entity11")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type2", Sets.newHashSet((Object) "entity21", "entity22")));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "(type1:entity11,type2:entity21:entity22)";
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "(type1:entity11,type2:entity21:entity22) OR (type3:entity31:" + "entity32:entity33,type1:entity11:entity12)";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", Sets.newHashSet((Object) "entity11")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type2", Sets.newHashSet((Object) "entity21", "entity22"))), new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type3", Sets.newHashSet((Object) "entity31", "entity32", "entity33")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", Sets.newHashSet((Object) "entity11", "entity12"))));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "!(type1:entity11,type2:entity21:entity22,type5:entity51) OR " + "(type3:entity31:entity32:entity33,type1:entity11:entity12)";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type1", Sets.newHashSet((Object) "entity11")), new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type2", Sets.newHashSet((Object) "entity21", "entity22")), new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type5", Sets.newHashSet((Object) "entity51"))), new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type3", Sets.newHashSet((Object) "entity31", "entity32", "entity33")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", Sets.newHashSet((Object) "entity11", "entity12"))));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "(((!(type1:entity11,type2:entity21:entity22,type5:entity51) OR " + "(type3:entity31:entity32:entity33,type1:entity11:entity12)) AND " + "(!(type11:entity111) OR !(type4:entity43:entity44:entity47:entity49," + "type7:entity71))) OR ((type2:entity2,type8:entity88) AND t9:e:e1))";
    expectedList = new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type1", Sets.newHashSet((Object) "entity11")), new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type2", Sets.newHashSet((Object) "entity21", "entity22")), new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type5", Sets.newHashSet((Object) "entity51"))), new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type3", Sets.newHashSet((Object) "entity31", "entity32", "entity33")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type1", Sets.newHashSet((Object) "entity11", "entity12")))), new TimelineFilterList(Operator.OR, new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type11", Sets.newHashSet((Object) "entity111"))), new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type4", Sets.newHashSet((Object) "entity43", "entity44", "entity47", "entity49")), new TimelineKeyValuesFilter(TimelineCompareOp.NOT_EQUAL, "type7", Sets.newHashSet((Object) "entity71"))))), new TimelineFilterList(new TimelineFilterList(new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type2", Sets.newHashSet((Object) "entity2")), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "type8", Sets.newHashSet((Object) "entity88"))), new TimelineKeyValuesFilter(TimelineCompareOp.EQUAL, "t9", Sets.newHashSet((Object) "e", "e1"))));
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "   (   (  (   !   (   type1:entity11  ,  type2:entity21:entity22" + "  ,  type5:entity51  )   OR  (   type3:entity31:entity32:entity33  " + "     ,   type1:entity11:entity12)) AND (!(  type11:entity111  )  OR " + "    !   (   type4:entity43:entity44:entity47:entity49 , " + "type7:entity71  )  )  ) OR  (  (  type2:entity2 , type8:entity88) " + "AND  t9:e:e1 )    ) ";
    verifyFilterList(expr, TimelineReaderWebServicesUtils.parseRelationFilters(expr), expectedList);
    expr = "(((!(type1 : entity11,type2:entity21:entity22,type5:entity51) OR " + "(type3:entity31:entity32:entity33,type1:entity11:entity12)) AND " + "(!(type11:entity111) OR !(type4:entity43:entity44:entity47:entity49," + "type7:entity71))) OR ((type2:entity2,type8:entity88) AND t9:e:e1))";
    try {
        TimelineReaderWebServicesUtils.parseRelationFilters(expr);
        fail("Space not allowed in relation expression. Exception should have " + "been thrown");
    } catch (TimelineParseException e) {
    }
}
302337.3316102hadoop
public void testGetEntitiesDataToRetrieve() throws Exception {
    Client client = createClient();
    try {
        URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?confstoretrieve=cfg_");
        ClientResponse resp = getResponse(client, uri);
        Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        int cfgCnt = 0;
        for (TimelineEntity entity : entities) {
            cfgCnt += entity.getConfigs().size();
            for (String configKey : entity.getConfigs().keySet()) {
                assertTrue(configKey.startsWith("cfg_"));
            }
        }
        assertEquals(2, cfgCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?confstoretrieve=cfg_,config_");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        cfgCnt = 0;
        for (TimelineEntity entity : entities) {
            cfgCnt += entity.getConfigs().size();
            for (String configKey : entity.getConfigs().keySet()) {
                assertTrue(configKey.startsWith("cfg_") || configKey.startsWith("config_"));
            }
        }
        assertEquals(5, cfgCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?confstoretrieve=!(cfg_,config_)");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        cfgCnt = 0;
        for (TimelineEntity entity : entities) {
            cfgCnt += entity.getConfigs().size();
            for (String configKey : entity.getConfigs().keySet()) {
                assertTrue(configKey.startsWith("configuration_"));
            }
        }
        assertEquals(1, cfgCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricstoretrieve=MAP_");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        int metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            for (TimelineMetric metric : entity.getMetrics()) {
                assertTrue(metric.getId().startsWith("MAP_"));
            }
        }
        assertEquals(1, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricstoretrieve=MAP1_,HDFS_");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            for (TimelineMetric metric : entity.getMetrics()) {
                assertTrue(metric.getId().startsWith("MAP1_") || metric.getId().startsWith("HDFS_"));
            }
        }
        assertEquals(3, metricCnt);
        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" + "timeline/clusters/cluster1/apps/application_1111111111_1111/" + "entities/type1?metricstoretrieve=!(MAP1_,HDFS_)");
        resp = getResponse(client, uri);
        entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() {
        });
        assertNotNull(entities);
        assertEquals(2, entities.size());
        metricCnt = 0;
        for (TimelineEntity entity : entities) {
            metricCnt += entity.getMetrics().size();
            for (TimelineMetric metric : entity.getMetrics()) {
                assertTrue(metric.getId().startsWith("MAP_") || metric.getId().startsWith("MAP11_"));
            }
        }
        assertEquals(2, metricCnt);
    } finally {
        client.destroy();
    }
}
302823.034110hadoop
public void testReadAppsConfigFilters() throws Exception {
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "cfg_param1", "value1"));
    list1.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "cfg_param2", "value2"));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "cfg_param1", "value3"));
    list2.addFilter(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "config_param2", "value2"));
    TimelineFilterList confFilterList = new TimelineFilterList(Operator.OR, list1, list2);
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(2, entities.size());
    int cfgCnt = 0;
    for (TimelineEntity entity : entities) {
        cfgCnt += entity.getConfigs().size();
    }
    assertEquals(5, cfgCnt);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    cfgCnt = 0;
    for (TimelineEntity entity : entities) {
        cfgCnt += entity.getConfigs().size();
    }
    assertEquals(5, cfgCnt);
    TimelineFilterList confFilterList1 = new TimelineFilterList(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "cfg_param1", "value1"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList1).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(1, entities.size());
    cfgCnt = 0;
    for (TimelineEntity entity : entities) {
        cfgCnt += entity.getConfigs().size();
    }
    assertEquals(3, cfgCnt);
    TimelineFilterList confFilterList2 = new TimelineFilterList(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "cfg_param1", "value1"), new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "config_param2", "value2"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList2).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList confFilterList3 = new TimelineFilterList(new TimelineKeyValueFilter(TimelineCompareOp.EQUAL, "dummy_config", "value1"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList3).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList confFilterList4 = new TimelineFilterList(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1"));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList4).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList confFilterList5 = new TimelineFilterList(new TimelineKeyValueFilter(TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1", false));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().configFilters(confFilterList5).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS), null, null, null));
    assertEquals(3, entities.size());
}
302865.044110hadoop
public void testReadAppsMetricFilters() throws Exception {
    TimelineFilterList list1 = new TimelineFilterList();
    list1.addFilter(new TimelineCompareFilter(TimelineCompareOp.GREATER_OR_EQUAL, "MAP1_SLOT_MILLIS", 50000000900L));
    TimelineFilterList list2 = new TimelineFilterList();
    list2.addFilter(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "MAP_SLOT_MILLIS", 80000000000L));
    list2.addFilter(new TimelineCompareFilter(TimelineCompareOp.EQUAL, "MAP1_BYTES", 50));
    TimelineFilterList metricFilterList = new TimelineFilterList(Operator.OR, list1, list2);
    Set<TimelineEntity> entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(2, entities.size());
    int metricCnt = 0;
    for (TimelineEntity entity : entities) {
        metricCnt += entity.getMetrics().size();
    }
    assertEquals(3, metricCnt);
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null));
    assertEquals(2, entities.size());
    metricCnt = 0;
    for (TimelineEntity entity : entities) {
        metricCnt += entity.getMetrics().size();
    }
    assertEquals(3, metricCnt);
    TimelineFilterList metricFilterList1 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.LESS_OR_EQUAL, "MAP_SLOT_MILLIS", 80000000000L), new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "MAP1_BYTES", 30));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList1).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(1, entities.size());
    metricCnt = 0;
    for (TimelineEntity entity : entities) {
        metricCnt += entity.getMetrics().size();
    }
    assertEquals(2, metricCnt);
    TimelineFilterList metricFilterList2 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.LESS_THAN, "MAP_SLOT_MILLIS", 40000000000L), new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "MAP1_BYTES", 30));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList2).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList metricFilterList3 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.EQUAL, "dummy_metric", 5));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList3).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList metricFilterList4 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "dummy_metric", 5));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList4).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(0, entities.size());
    TimelineFilterList metricFilterList5 = new TimelineFilterList(new TimelineCompareFilter(TimelineCompareOp.NOT_EQUAL, "dummy_metric", 5, false));
    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1", "some_flow_name", 1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(), null), new TimelineEntityFilters.Builder().metricFilters(metricFilterList5).build(), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), null, null, null));
    assertEquals(3, entities.size());
}
302713.595119kafka
public void testOffsetFetchV8AndAbove() throws Exception {
    String groupOne = "group1";
    String groupTwo = "group2";
    String groupThree = "group3";
    String groupFour = "group4";
    String groupFive = "group5";
    String topic1 = "topic1";
    String topic2 = "topic2";
    String topic3 = "topic3";
    OffsetFetchRequestTopics topicOne = new OffsetFetchRequestTopics().setName(topic1).setPartitionIndexes(Collections.singletonList(5));
    OffsetFetchRequestTopics topicTwo = new OffsetFetchRequestTopics().setName(topic2).setPartitionIndexes(Collections.singletonList(10));
    OffsetFetchRequestTopics topicThree = new OffsetFetchRequestTopics().setName(topic3).setPartitionIndexes(Collections.singletonList(15));
    List<OffsetFetchRequestTopics> groupOneTopics = singletonList(topicOne);
    OffsetFetchRequestGroup group1 = new OffsetFetchRequestGroup().setGroupId(groupOne).setTopics(groupOneTopics);
    List<OffsetFetchRequestTopics> groupTwoTopics = Arrays.asList(topicOne, topicTwo);
    OffsetFetchRequestGroup group2 = new OffsetFetchRequestGroup().setGroupId(groupTwo).setTopics(groupTwoTopics);
    List<OffsetFetchRequestTopics> groupThreeTopics = Arrays.asList(topicOne, topicTwo, topicThree);
    OffsetFetchRequestGroup group3 = new OffsetFetchRequestGroup().setGroupId(groupThree).setTopics(groupThreeTopics);
    OffsetFetchRequestGroup group4 = new OffsetFetchRequestGroup().setGroupId(groupFour).setTopics(null);
    OffsetFetchRequestGroup group5 = new OffsetFetchRequestGroup().setGroupId(groupFive).setTopics(null);
    OffsetFetchRequestData requestData = new OffsetFetchRequestData().setGroups(Arrays.asList(group1, group2, group3, group4, group5)).setRequireStable(true);
    testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData);
    testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData.setRequireStable(false));
    for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
        if (version >= 8) {
            testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, requestData);
        }
    }
    OffsetFetchResponseTopics responseTopic1 = new OffsetFetchResponseTopics().setName(topic1).setPartitions(Collections.singletonList(new OffsetFetchResponsePartitions().setPartitionIndex(5).setMetadata(null).setCommittedOffset(100).setCommittedLeaderEpoch(3).setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())));
    OffsetFetchResponseTopics responseTopic2 = new OffsetFetchResponseTopics().setName(topic2).setPartitions(Collections.singletonList(new OffsetFetchResponsePartitions().setPartitionIndex(10).setMetadata("foo").setCommittedOffset(200).setCommittedLeaderEpoch(2).setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code())));
    OffsetFetchResponseTopics responseTopic3 = new OffsetFetchResponseTopics().setName(topic3).setPartitions(Collections.singletonList(new OffsetFetchResponsePartitions().setPartitionIndex(15).setMetadata("bar").setCommittedOffset(300).setCommittedLeaderEpoch(1).setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code())));
    OffsetFetchResponseGroup responseGroup1 = new OffsetFetchResponseGroup().setGroupId(groupOne).setTopics(Collections.singletonList(responseTopic1)).setErrorCode(Errors.NOT_COORDINATOR.code());
    OffsetFetchResponseGroup responseGroup2 = new OffsetFetchResponseGroup().setGroupId(groupTwo).setTopics(Arrays.asList(responseTopic1, responseTopic2)).setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code());
    OffsetFetchResponseGroup responseGroup3 = new OffsetFetchResponseGroup().setGroupId(groupThree).setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)).setErrorCode(Errors.NONE.code());
    OffsetFetchResponseGroup responseGroup4 = new OffsetFetchResponseGroup().setGroupId(groupFour).setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)).setErrorCode(Errors.NONE.code());
    OffsetFetchResponseGroup responseGroup5 = new OffsetFetchResponseGroup().setGroupId(groupFive).setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)).setErrorCode(Errors.NONE.code());
    Supplier<OffsetFetchResponseData> response = () -> new OffsetFetchResponseData().setGroups(Arrays.asList(responseGroup1, responseGroup2, responseGroup3, responseGroup4, responseGroup5)).setThrottleTimeMs(10);
    for (short version : ApiKeys.OFFSET_FETCH.allVersions()) {
        if (version >= 8) {
            OffsetFetchResponseData responseData = response.get();
            testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, responseData);
        }
    }
}
302960.2910106kafka
public static void main(String[] args) throws Exception {
    final int iters = Integer.parseInt(args[0]);
    double x = 0.0;
    long start = System.nanoTime();
    for (int i = 0; i < iters; i++) x += Math.sqrt(x);
    System.out.println(x);
    System.out.println("sqrt: " + (System.nanoTime() - start) / (double) iters);
    systemMillis(iters);
    systemNanos(iters);
    long total = 0;
    start = System.nanoTime();
    total += systemMillis(iters);
    System.out.println("System.currentTimeMillis(): " + (System.nanoTime() - start) / iters);
    start = System.nanoTime();
    total += systemNanos(iters);
    System.out.println("System.nanoTime(): " + (System.nanoTime() - start) / iters);
    System.out.println(total);
    int n = 0;
    Random random = new Random();
    start = System.nanoTime();
    for (int i = 0; i < iters; i++) {
        n += random.nextInt();
    }
    System.out.println(n);
    System.out.println("random: " + (System.nanoTime() - start) / iters);
    float[] floats = new float[1024];
    for (int i = 0; i < floats.length; i++) floats[i] = random.nextFloat();
    Arrays.sort(floats);
    int loc = 0;
    start = System.nanoTime();
    for (int i = 0; i < iters; i++) loc += Arrays.binarySearch(floats, floats[i % floats.length]);
    System.out.println(loc);
    System.out.println("binary search: " + (System.nanoTime() - start) / iters);
    final Time time = Time.SYSTEM;
    final AtomicBoolean done = new AtomicBoolean(false);
    final Object lock = new Object();
    Thread t1 = new Thread(() -> {
        time.sleep(1);
        int counter = 0;
        long start1 = time.nanoseconds();
        for (int i = 0; i < iters; i++) {
            synchronized (lock) {
                counter++;
            }
        }
        System.out.println("synchronized: " + ((time.nanoseconds() - start1) / iters));
        System.out.println(counter);
        done.set(true);
    });
    Thread t2 = new Thread(() -> {
        int counter = 0;
        while (!done.get()) {
            time.sleep(1);
            synchronized (lock) {
                counter += 1;
            }
        }
        System.out.println("Counter: " + counter);
    });
    t1.start();
    t2.start();
    t1.join();
    t2.join();
    System.out.println("Testing locks");
    done.set(false);
    final ReentrantLock lock2 = new ReentrantLock();
    Thread t3 = new Thread(() -> {
        time.sleep(1);
        int counter = 0;
        long start12 = time.nanoseconds();
        for (int i = 0; i < iters; i++) {
            lock2.lock();
            counter++;
            lock2.unlock();
        }
        System.out.println("lock: " + ((time.nanoseconds() - start12) / iters));
        System.out.println(counter);
        done.set(true);
    });
    Thread t4 = new Thread(() -> {
        int counter = 0;
        while (!done.get()) {
            time.sleep(1);
            lock2.lock();
            counter++;
            lock2.unlock();
        }
        System.out.println("Counter: " + counter);
    });
    t3.start();
    t4.start();
    t3.join();
    t4.join();
    Map<String, Integer> values = new HashMap<>();
    for (int i = 0; i < 100; i++) values.put(Integer.toString(i), i);
    System.out.println("HashMap:");
    benchMap(2, 1000000, values);
    System.out.println("ConcurrentHashMap:");
    benchMap(2, 1000000, new ConcurrentHashMap<>(values));
    System.out.println("CopyOnWriteMap:");
    benchMap(2, 1000000, new CopyOnWriteMap<>(values));
}
303442.058103kafka
public void testReplication() throws Exception {
    produceMessages(primaryProducer, "test-topic-1");
    String backupTopic1 = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS);
    if (replicateBackupToPrimary) {
        produceMessages(backupProducer, "test-topic-1");
    }
    String reverseTopic1 = remoteTopicName("test-topic-1", BACKUP_CLUSTER_ALIAS);
    String consumerGroupName = "consumer-group-testReplication";
    Map<String, Object> consumerProps = Collections.singletonMap("group.id", consumerGroupName);
    warmUpConsumer(consumerProps);
    mm2Config = new MirrorMakerConfig(mm2Props);
    waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS);
    List<Class<? extends Connector>> primaryConnectors = replicateBackupToPrimary ? CONNECTOR_LIST : Collections.singletonList(MirrorHeartbeatConnector.class);
    waitUntilMirrorMakerIsRunning(primary, primaryConnectors, mm2Config, BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS);
    MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS));
    MirrorClient backupClient = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS));
    waitForTopicCreated(primary, reverseTopic1);
    waitForTopicCreated(backup, backupTopic1);
    waitForTopicCreated(primary, "mm2-offset-syncs.backup.internal");
    assertEquals(TopicConfig.CLEANUP_POLICY_COMPACT, getTopicConfig(backup.kafka(), backupTopic1, TopicConfig.CLEANUP_POLICY_CONFIG), "topic config was not synced");
    createAndTestNewTopicWithConfigFilter();
    assertEquals(NUM_RECORDS_PRODUCED, primary.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, backup.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, backupTopic1).count(), "Records were not replicated to backup cluster.");
    assertEquals(NUM_RECORDS_PRODUCED, backup.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, "test-topic-1").count(), "Records were not produced to backup cluster.");
    if (replicateBackupToPrimary) {
        assertEquals(NUM_RECORDS_PRODUCED, primary.kafka().consume(NUM_RECORDS_PRODUCED, RECORD_TRANSFER_DURATION_MS, reverseTopic1).count(), "Records were not replicated to primary cluster.");
        assertEquals(NUM_RECORDS_PRODUCED * 2, primary.kafka().consume(NUM_RECORDS_PRODUCED * 2, RECORD_TRANSFER_DURATION_MS, reverseTopic1, "test-topic-1").count(), "Primary cluster doesn't have all records from both clusters.");
        assertEquals(NUM_RECORDS_PRODUCED * 2, backup.kafka().consume(NUM_RECORDS_PRODUCED * 2, RECORD_TRANSFER_DURATION_MS, backupTopic1, "test-topic-1").count(), "Backup cluster doesn't have all records from both clusters.");
    }
    assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to primary cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "heartbeats").count() > 0, "Heartbeats were not emitted to backup cluster.");
    assertTrue(backup.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "primary.heartbeats").count() > 0, "Heartbeats were not replicated downstream to backup cluster.");
    if (replicateBackupToPrimary) {
        assertTrue(primary.kafka().consume(1, RECORD_TRANSFER_DURATION_MS, "backup.heartbeats").count() > 0, "Heartbeats were not replicated downstream to primary cluster.");
    }
    assertTrue(backupClient.upstreamClusters().contains(PRIMARY_CLUSTER_ALIAS), "Did not find upstream primary cluster.");
    assertEquals(1, backupClient.replicationHops(PRIMARY_CLUSTER_ALIAS), "Did not calculate replication hops correctly.");
    assertTrue(backup.kafka().consume(1, CHECKPOINT_DURATION_MS, "primary.checkpoints.internal").count() > 0, "Checkpoints were not emitted downstream to backup cluster.");
    if (replicateBackupToPrimary) {
        assertTrue(primaryClient.upstreamClusters().contains(BACKUP_CLUSTER_ALIAS), "Did not find upstream backup cluster.");
        assertEquals(1, primaryClient.replicationHops(BACKUP_CLUSTER_ALIAS), "Did not calculate replication hops correctly.");
        assertTrue(primary.kafka().consume(1, CHECKPOINT_DURATION_MS, "backup.checkpoints.internal").count() > 0, "Checkpoints were not emitted upstream to primary cluster.");
    }
    Map<TopicPartition, OffsetAndMetadata> backupOffsets = waitForCheckpointOnAllPartitions(backupClient, consumerGroupName, PRIMARY_CLUSTER_ALIAS, backupTopic1);
    try (Consumer<byte[], byte[]> primaryConsumer = backup.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) {
        primaryConsumer.assign(backupOffsets.keySet());
        backupOffsets.forEach(primaryConsumer::seek);
        primaryConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
        primaryConsumer.commitAsync();
        assertTrue(primaryConsumer.position(new TopicPartition(backupTopic1, 0)) > 0, "Consumer failedover to zero offset.");
        assertTrue(primaryConsumer.position(new TopicPartition(backupTopic1, 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedover beyond expected offset.");
    }
    assertMonotonicCheckpoints(backup, "primary.checkpoints.internal");
    primaryClient.close();
    backupClient.close();
    if (replicateBackupToPrimary) {
        Map<TopicPartition, OffsetAndMetadata> primaryOffsets = waitForCheckpointOnAllPartitions(primaryClient, consumerGroupName, BACKUP_CLUSTER_ALIAS, reverseTopic1);
        try (Consumer<byte[], byte[]> primaryConsumer = primary.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) {
            primaryConsumer.assign(primaryOffsets.keySet());
            primaryOffsets.forEach(primaryConsumer::seek);
            primaryConsumer.poll(CONSUMER_POLL_TIMEOUT_MS);
            primaryConsumer.commitAsync();
            assertTrue(primaryConsumer.position(new TopicPartition(reverseTopic1, 0)) > 0, "Consumer failedback to zero downstream offset.");
            assertTrue(primaryConsumer.position(new TopicPartition(reverseTopic1, 0)) <= NUM_RECORDS_PRODUCED, "Consumer failedback beyond expected downstream offset.");
        }
    }
    primary.kafka().createTopic("test-topic-2", NUM_PARTITIONS);
    String backupTopic2 = remoteTopicName("test-topic-2", PRIMARY_CLUSTER_ALIAS);
    waitForTopicCreated(backup, backupTopic2);
    produceMessages(primaryProducer, "test-topic-2", 1);
    assertEquals(NUM_RECORDS_PER_PARTITION, primary.kafka().consume(NUM_RECORDS_PER_PARTITION, RECORD_TRANSFER_DURATION_MS, "test-topic-2").count(), "Records were not produced to primary cluster.");
    assertEquals(NUM_RECORDS_PER_PARTITION, backup.kafka().consume(NUM_RECORDS_PER_PARTITION, 2 * RECORD_TRANSFER_DURATION_MS, backupTopic2).count(), "New topic was not replicated to backup cluster.");
    if (replicateBackupToPrimary) {
        backup.kafka().createTopic("test-topic-3", NUM_PARTITIONS);
        String reverseTopic3 = remoteTopicName("test-topic-3", BACKUP_CLUSTER_ALIAS);
        waitForTopicCreated(primary, reverseTopic3);
        produceMessages(backupProducer, "test-topic-3", 1);
        assertEquals(NUM_RECORDS_PER_PARTITION, backup.kafka().consume(NUM_RECORDS_PER_PARTITION, RECORD_TRANSFER_DURATION_MS, "test-topic-3").count(), "Records were not produced to backup cluster.");
        assertEquals(NUM_RECORDS_PER_PARTITION, primary.kafka().consume(NUM_RECORDS_PER_PARTITION, 2 * RECORD_TRANSFER_DURATION_MS, reverseTopic3).count(), "New topic was not replicated to primary cluster.");
    }
}
301887.7816107kafka
private void generateVariableLengthReader(Versions fieldFlexibleVersions, String name, FieldType type, Versions possibleVersions, Versions nullableVersions, String assignmentPrefix, String assignmentSuffix, boolean isStructArrayWithKeys, boolean zeroCopy) {
    String lengthVar = type.isArray() ? "arrayLength" : "length";
    buffer.printf("int %s;%n", lengthVar);
    VersionConditional.forVersions(fieldFlexibleVersions, possibleVersions).ifMember(__ -> {
        buffer.printf("%s = _readable.readUnsignedVarint() - 1;%n", lengthVar);
    }).ifNotMember(__ -> {
        if (type.isString()) {
            buffer.printf("%s = _readable.readShort();%n", lengthVar);
        } else if (type.isBytes() || type.isArray() || type.isRecords()) {
            buffer.printf("%s = _readable.readInt();%n", lengthVar);
        } else {
            throw new RuntimeException("Can't handle variable length type " + type);
        }
    }).generate(buffer);
    buffer.printf("if (%s < 0) {%n", lengthVar);
    buffer.incrementIndent();
    VersionConditional.forVersions(nullableVersions, possibleVersions).ifNotMember(__ -> {
        buffer.printf("throw new RuntimeException(\"non-nullable field %s " + "was serialized as null\");%n", name);
    }).ifMember(__ -> {
        buffer.printf("%snull%s", assignmentPrefix, assignmentSuffix);
    }).generate(buffer);
    buffer.decrementIndent();
    if (type.isString()) {
        buffer.printf("} else if (%s > 0x7fff) {%n", lengthVar);
        buffer.incrementIndent();
        buffer.printf("throw new RuntimeException(\"string field %s " + "had invalid length \" + %s);%n", name, lengthVar);
        buffer.decrementIndent();
    }
    buffer.printf("} else {%n");
    buffer.incrementIndent();
    if (type.isString()) {
        buffer.printf("%s_readable.readString(%s)%s", assignmentPrefix, lengthVar, assignmentSuffix);
    } else if (type.isBytes()) {
        if (zeroCopy) {
            buffer.printf("%s_readable.readByteBuffer(%s)%s", assignmentPrefix, lengthVar, assignmentSuffix);
        } else {
            buffer.printf("byte[] newBytes = _readable.readArray(%s);%n", lengthVar);
            buffer.printf("%snewBytes%s", assignmentPrefix, assignmentSuffix);
        }
    } else if (type.isRecords()) {
        buffer.printf("%s_readable.readRecords(%s)%s", assignmentPrefix, lengthVar, assignmentSuffix);
    } else if (type.isArray()) {
        FieldType.ArrayType arrayType = (FieldType.ArrayType) type;
        buffer.printf("if (%s > _readable.remaining()) {%n", lengthVar);
        buffer.incrementIndent();
        buffer.printf("throw new RuntimeException(\"Tried to allocate a collection of size \" + %s + \", but " + "there are only \" + _readable.remaining() + \" bytes remaining.\");%n", lengthVar);
        buffer.decrementIndent();
        buffer.printf("}%n");
        if (isStructArrayWithKeys) {
            headerGenerator.addImport(MessageGenerator.IMPLICIT_LINKED_HASH_MULTI_COLLECTION_CLASS);
            buffer.printf("%s newCollection = new %s(%s);%n", FieldSpec.collectionType(arrayType.elementType().toString()), FieldSpec.collectionType(arrayType.elementType().toString()), lengthVar);
        } else {
            headerGenerator.addImport(MessageGenerator.ARRAYLIST_CLASS);
            String boxedArrayType = arrayType.elementType().getBoxedJavaType(headerGenerator);
            buffer.printf("ArrayList<%s> newCollection = new ArrayList<>(%s);%n", boxedArrayType, lengthVar);
        }
        buffer.printf("for (int i = 0; i < %s; i++) {%n", lengthVar);
        buffer.incrementIndent();
        if (arrayType.elementType().isArray()) {
            throw new RuntimeException("Nested arrays are not supported.  " + "Use an array of structures containing another array.");
        } else if (arrayType.elementType().isBytes() || arrayType.elementType().isString()) {
            generateVariableLengthReader(fieldFlexibleVersions, name + " element", arrayType.elementType(), possibleVersions, Versions.NONE, "newCollection.add(", String.format(");%n"), false, false);
        } else {
            buffer.printf("newCollection.add(%s);%n", primitiveReadExpression(arrayType.elementType()));
        }
        buffer.decrementIndent();
        buffer.printf("}%n");
        buffer.printf("%snewCollection%s", assignmentPrefix, assignmentSuffix);
    } else {
        throw new RuntimeException("Can't handle variable length type " + type);
    }
    buffer.decrementIndent();
    buffer.printf("}%n");
}
302502.2913105kafka
private static void assertApiMessageAndVersionEquals(ApiMessageAndVersion expected, ApiMessageAndVersion actual) {
    if (expected == actual)
        return;
    assertEquals(expected.version(), actual.version());
    if (actual.message() instanceof ConsumerGroupCurrentMemberAssignmentValue) {
        ConsumerGroupCurrentMemberAssignmentValue expectedValue = (ConsumerGroupCurrentMemberAssignmentValue) expected.message();
        ConsumerGroupCurrentMemberAssignmentValue actualValue = (ConsumerGroupCurrentMemberAssignmentValue) actual.message();
        assertEquals(expectedValue.memberEpoch(), actualValue.memberEpoch());
        assertEquals(expectedValue.previousMemberEpoch(), actualValue.previousMemberEpoch());
        assertEquals(fromTopicPartitions(expectedValue.assignedPartitions()), fromTopicPartitions(actualValue.assignedPartitions()));
        assertEquals(fromTopicPartitions(expectedValue.partitionsPendingRevocation()), fromTopicPartitions(actualValue.partitionsPendingRevocation()));
    } else if (actual.message() instanceof ConsumerGroupPartitionMetadataValue) {
        ConsumerGroupPartitionMetadataValue expectedValue = (ConsumerGroupPartitionMetadataValue) expected.message().duplicate();
        ConsumerGroupPartitionMetadataValue actualValue = (ConsumerGroupPartitionMetadataValue) actual.message().duplicate();
        List<ConsumerGroupPartitionMetadataValue.TopicMetadata> expectedTopicMetadataList = expectedValue.topics();
        List<ConsumerGroupPartitionMetadataValue.TopicMetadata> actualTopicMetadataList = actualValue.topics();
        if (expectedTopicMetadataList.size() != actualTopicMetadataList.size()) {
            fail("Topic metadata lists have different sizes");
        }
        expectedTopicMetadataList.sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.TopicMetadata::topicId));
        actualTopicMetadataList.sort(Comparator.comparing(ConsumerGroupPartitionMetadataValue.TopicMetadata::topicId));
        for (int i = 0; i < expectedTopicMetadataList.size(); i++) {
            ConsumerGroupPartitionMetadataValue.TopicMetadata expectedTopicMetadata = expectedTopicMetadataList.get(i);
            ConsumerGroupPartitionMetadataValue.TopicMetadata actualTopicMetadata = actualTopicMetadataList.get(i);
            assertEquals(expectedTopicMetadata.topicId(), actualTopicMetadata.topicId());
            assertEquals(expectedTopicMetadata.topicName(), actualTopicMetadata.topicName());
            assertEquals(expectedTopicMetadata.numPartitions(), actualTopicMetadata.numPartitions());
            List<ConsumerGroupPartitionMetadataValue.PartitionMetadata> expectedPartitionMetadataList = expectedTopicMetadata.partitionMetadata();
            List<ConsumerGroupPartitionMetadataValue.PartitionMetadata> actualPartitionMetadataList = actualTopicMetadata.partitionMetadata();
            if (expectedPartitionMetadataList.size() != actualPartitionMetadataList.size()) {
                fail("Partition metadata lists have different sizes");
            } else if (!expectedPartitionMetadataList.isEmpty() && !actualPartitionMetadataList.isEmpty()) {
                for (int j = 0; j < expectedPartitionMetadataList.size(); j++) {
                    ConsumerGroupPartitionMetadataValue.PartitionMetadata expectedPartitionMetadata = expectedPartitionMetadataList.get(j);
                    ConsumerGroupPartitionMetadataValue.PartitionMetadata actualPartitionMetadata = actualPartitionMetadataList.get(j);
                    assertEquals(expectedPartitionMetadata.partition(), actualPartitionMetadata.partition());
                    assertUnorderedListEquals(expectedPartitionMetadata.racks(), actualPartitionMetadata.racks());
                }
            }
        }
    } else if (actual.message() instanceof GroupMetadataValue) {
        GroupMetadataValue expectedValue = (GroupMetadataValue) expected.message().duplicate();
        GroupMetadataValue actualValue = (GroupMetadataValue) actual.message().duplicate();
        Comparator<GroupMetadataValue.MemberMetadata> comparator = Comparator.comparing(GroupMetadataValue.MemberMetadata::memberId);
        expectedValue.members().sort(comparator);
        actualValue.members().sort(comparator);
        try {
            Arrays.asList(expectedValue, actualValue).forEach(value -> value.members().forEach(memberMetadata -> {
                ConsumerPartitionAssignor.Subscription subscription = ConsumerProtocol.deserializeSubscription(ByteBuffer.wrap(memberMetadata.subscription()));
                subscription.topics().sort(String::compareTo);
                subscription.ownedPartitions().sort(Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition));
                memberMetadata.setSubscription(Utils.toArray(ConsumerProtocol.serializeSubscription(subscription, ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.subscription())))));
                ConsumerPartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(ByteBuffer.wrap(memberMetadata.assignment()));
                assignment.partitions().sort(Comparator.comparing(TopicPartition::topic).thenComparing(TopicPartition::partition));
                memberMetadata.setAssignment(Utils.toArray(ConsumerProtocol.serializeAssignment(assignment, ConsumerProtocol.deserializeVersion(ByteBuffer.wrap(memberMetadata.assignment())))));
            }));
        } catch (SchemaException ex) {
            fail("Failed deserialization: " + ex.getMessage());
        }
        assertEquals(expectedValue, actualValue);
    } else if (actual.message() instanceof ConsumerGroupTargetAssignmentMemberValue) {
        ConsumerGroupTargetAssignmentMemberValue expectedValue = (ConsumerGroupTargetAssignmentMemberValue) expected.message().duplicate();
        ConsumerGroupTargetAssignmentMemberValue actualValue = (ConsumerGroupTargetAssignmentMemberValue) actual.message().duplicate();
        Comparator<ConsumerGroupTargetAssignmentMemberValue.TopicPartition> comparator = Comparator.comparing(ConsumerGroupTargetAssignmentMemberValue.TopicPartition::topicId);
        expectedValue.topicPartitions().sort(comparator);
        actualValue.topicPartitions().sort(comparator);
        assertEquals(expectedValue, actualValue);
    } else {
        assertEquals(expected.message(), actual.message());
    }
}
301993.541127kafka
public void testUpdateSubscriptionMetadata() {
    Uuid fooTopicId = Uuid.randomUuid();
    Uuid barTopicId = Uuid.randomUuid();
    Uuid zarTopicId = Uuid.randomUuid();
    MetadataImage image = new MetadataImageBuilder().addTopic(fooTopicId, "foo", 1).addTopic(barTopicId, "bar", 2).addTopic(zarTopicId, "zar", 3).addRacks().build();
    ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member1").setSubscribedTopicNames(Collections.singletonList("foo")).build();
    ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member2").setSubscribedTopicNames(Collections.singletonList("bar")).build();
    ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member3").setSubscribedTopicNames(Collections.singletonList("zar")).build();
    ConsumerGroup consumerGroup = createConsumerGroup("group-foo");
    assertEquals(Collections.emptyMap(), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, null), image.topics(), image.cluster()));
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, member1), image.topics(), image.cluster()));
    consumerGroup.updateMember(member1);
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, null), image.topics(), image.cluster()));
    assertEquals(Collections.emptyMap(), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(member1, null), image.topics(), image.cluster()));
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))), mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, member2), image.topics(), image.cluster()));
    consumerGroup.updateMember(member2);
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))), mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, null), image.topics(), image.cluster()));
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(member2, null), image.topics(), image.cluster()));
    assertEquals(mkMap(mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(member1, null), image.topics(), image.cluster()));
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))), mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))), mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, member3), image.topics(), image.cluster()));
    consumerGroup.updateMember(member3);
    assertEquals(mkMap(mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))), mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))), mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))), consumerGroup.computeSubscriptionMetadata(consumerGroup.computeSubscribedTopicNames(null, null), image.topics(), image.cluster()));
}
302425.251124kafka
public void testRebalanceTimeoutLifecycle() {
    String groupId = "fooup";
    String memberId1 = Uuid.randomUuid().toString();
    String memberId2 = Uuid.randomUuid().toString();
    Uuid fooTopicId = Uuid.randomUuid();
    String fooTopicName = "foo";
    MockPartitionAssignor assignor = new MockPartitionAssignor("range");
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().withAssignors(Collections.singletonList(assignor)).withMetadataImage(new MetadataImageBuilder().addTopic(fooTopicId, fooTopicName, 3).addRacks().build()).build();
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1, 2))));
        }
    }));
    CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId1).setMemberEpoch(0).setRebalanceTimeoutMs(180000).setSubscribedTopicNames(Collections.singletonList("foo")).setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId1).setMemberEpoch(1).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(0, 1, 2))))), result.response());
    assertEquals(Collections.emptyList(), context.sleep(result.response().heartbeatIntervalMs()));
    assignor.prepareGroupAssignment(new GroupAssignment(new HashMap<String, MemberAssignment>() {

        {
            put(memberId1, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))));
            put(memberId2, new MemberAssignment(mkAssignment(mkTopicAssignment(fooTopicId, 2))));
        }
    }));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId2).setMemberEpoch(0).setRebalanceTimeoutMs(90000).setSubscribedTopicNames(Collections.singletonList("foo")).setTopicPartitions(Collections.emptyList()));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId2).setMemberEpoch(2).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), result.response());
    assertEquals(Collections.emptyList(), context.sleep(result.response().heartbeatIntervalMs()));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId1).setMemberEpoch(1).setRebalanceTimeoutMs(12000).setSubscribedTopicNames(Collections.singletonList("foo")));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId1).setMemberEpoch(1).setHeartbeatIntervalMs(5000).setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment().setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatResponseData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(0, 1))))), result.response());
    ScheduledTimeout<Void, CoordinatorRecord> scheduledTimeout = context.assertRebalanceTimeout(groupId, memberId1, 12000);
    assertEquals(Collections.emptyList(), context.sleep(result.response().heartbeatIntervalMs()));
    result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData().setGroupId(groupId).setMemberId(memberId1).setMemberEpoch(1).setTopicPartitions(Collections.singletonList(new ConsumerGroupHeartbeatRequestData.TopicPartitions().setTopicId(fooTopicId).setPartitions(Arrays.asList(0, 1)))));
    assertResponseEquals(new ConsumerGroupHeartbeatResponseData().setMemberId(memberId1).setMemberEpoch(2).setHeartbeatIntervalMs(5000), result.response());
    context.assertNoRebalanceTimeout(groupId, memberId1);
    assertEquals(Collections.emptyList(), scheduledTimeout.operation.generateRecords().records());
}
302864.364107kafka
public void testRebalanceCompletesBeforeMemberJoins() throws Exception {
    GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder().build();
    ClassicGroup group = context.createClassicGroup("group-id");
    JoinGroupRequestData joinRequest = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder().withGroupId("group-id").withGroupInstanceId("leader-instance-id").withMemberId(UNKNOWN_MEMBER_ID).withDefaultProtocolTypeAndProtocols().withRebalanceTimeoutMs(10000).withSessionTimeoutMs(5000).build();
    JoinGroupResponseData leaderJoinResponse = context.joinClassicGroupAndCompleteJoin(joinRequest, true, true);
    String firstMemberId = leaderJoinResponse.memberId();
    int firstGenerationId = leaderJoinResponse.generationId();
    assertEquals(1, firstGenerationId);
    assertTrue(group.isInState(COMPLETING_REBALANCE));
    SyncGroupRequestData syncRequest = new GroupMetadataManagerTestContext.SyncGroupRequestBuilder().withGroupId("group-id").withMemberId(firstMemberId).withGenerationId(firstGenerationId).build();
    GroupMetadataManagerTestContext.SyncResult syncResult = context.sendClassicGroupSync(syncRequest);
    syncResult.appendFuture.complete(null);
    assertTrue(syncResult.syncFuture.isDone());
    assertEquals(Errors.NONE.code(), syncResult.syncFuture.get().errorCode());
    assertTrue(group.isInState(STABLE));
    GroupMetadataManagerTestContext.JoinResult secondMemberJoinResult = context.sendClassicGroupJoin(joinRequest.setMemberId(UNKNOWN_MEMBER_ID).setGroupInstanceId(null).setSessionTimeoutMs(2500));
    assertTrue(secondMemberJoinResult.records.isEmpty());
    assertFalse(secondMemberJoinResult.joinFuture.isDone());
    assertTrue(group.isInState(PREPARING_REBALANCE));
    HeartbeatRequestData firstMemberHeartbeatRequest = new HeartbeatRequestData().setGroupId("group-id").setMemberId(firstMemberId).setGenerationId(firstGenerationId);
    for (int i = 0; i < 2; i++) {
        GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2500));
        HeartbeatResponseData heartbeatResponse = context.sendClassicGroupHeartbeat(firstMemberHeartbeatRequest);
        assertEquals(Errors.REBALANCE_IN_PROGRESS.code(), heartbeatResponse.errorCode());
    }
    GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(8000));
    assertTrue(secondMemberJoinResult.joinFuture.isDone());
    assertEquals(Errors.NONE.code(), secondMemberJoinResult.joinFuture.get().errorCode());
    assertEquals(2, group.size());
    assertEquals(2, group.generationId());
    assertTrue(group.isInState(COMPLETING_REBALANCE));
    String otherMemberId = secondMemberJoinResult.joinFuture.get().memberId();
    syncResult = context.sendClassicGroupSync(syncRequest.setGroupInstanceId(null).setMemberId(otherMemberId).setGenerationId(2));
    syncResult.appendFuture.complete(null);
    assertTrue(syncResult.syncFuture.isDone());
    assertEquals(Errors.NONE.code(), syncResult.syncFuture.get().errorCode());
    assertTrue(group.isInState(STABLE));
    assertThrows(IllegalGenerationException.class, () -> context.sendClassicGroupHeartbeat(firstMemberHeartbeatRequest));
    List<Errors> expectedErrors = Arrays.asList(Errors.NONE, Errors.NONE, Errors.REBALANCE_IN_PROGRESS);
    for (Errors expectedError : expectedErrors) {
        GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2000));
        HeartbeatResponseData heartbeatResponse = context.sendClassicGroupHeartbeat(firstMemberHeartbeatRequest.setMemberId(otherMemberId).setGenerationId(2));
        assertEquals(expectedError.code(), heartbeatResponse.errorCode());
    }
    assertEquals(1, group.size());
    assertTrue(group.isInState(PREPARING_REBALANCE));
    GroupMetadataManagerTestContext.JoinResult otherMemberRejoinResult = context.sendClassicGroupJoin(joinRequest.setMemberId(otherMemberId).setGroupInstanceId(null).setSessionTimeoutMs(2500));
    assertTrue(otherMemberRejoinResult.records.isEmpty());
    assertTrue(otherMemberRejoinResult.joinFuture.isDone());
    assertEquals(Errors.NONE.code(), otherMemberRejoinResult.joinFuture.get().errorCode());
    assertEquals(3, otherMemberRejoinResult.joinFuture.get().generationId());
    assertTrue(group.isInState(COMPLETING_REBALANCE));
    GroupMetadataManagerTestContext.SyncResult otherMemberResyncResult = context.sendClassicGroupSync(syncRequest.setGroupInstanceId(null).setMemberId(otherMemberId).setGenerationId(3));
    otherMemberResyncResult.appendFuture.complete(null);
    assertTrue(otherMemberResyncResult.syncFuture.isDone());
    assertEquals(Errors.NONE.code(), otherMemberResyncResult.syncFuture.get().errorCode());
    assertTrue(group.isInState(STABLE));
    for (int i = 0; i < 20; i++) {
        GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2000));
        HeartbeatResponseData heartbeatResponse = context.sendClassicGroupHeartbeat(firstMemberHeartbeatRequest.setMemberId(otherMemberId).setGenerationId(3));
        assertEquals(Errors.NONE.code(), heartbeatResponse.errorCode());
    }
}
302596.389103kafka
public void shouldNotViolateEosIfOneTaskGetsFencedUsingIsolatedAppInstances() throws Exception {
    if (eosConfig.equals(StreamsConfig.AT_LEAST_ONCE))
        return;
    try (final KafkaStreams streams1 = getKafkaStreams("streams1", false, "appDir1", 1, eosConfig);
        final KafkaStreams streams2 = getKafkaStreams("streams2", false, "appDir2", 1, eosConfig)) {
        startApplicationAndWaitUntilRunning(streams1);
        startApplicationAndWaitUntilRunning(streams2);
        final List<KeyValue<Long, Long>> committedDataBeforeStall = prepareData(0L, 10L, 0L, 1L);
        final List<KeyValue<Long, Long>> uncommittedDataBeforeStall = prepareData(10L, 15L, 0L, 1L);
        final List<KeyValue<Long, Long>> dataBeforeStall = new ArrayList<>(committedDataBeforeStall.size() + uncommittedDataBeforeStall.size());
        dataBeforeStall.addAll(committedDataBeforeStall);
        dataBeforeStall.addAll(uncommittedDataBeforeStall);
        final List<KeyValue<Long, Long>> dataToTriggerFirstRebalance = prepareData(15L, 20L, 0L, 1L);
        final List<KeyValue<Long, Long>> dataAfterSecondRebalance = prepareData(20L, 30L, 0L, 1L);
        writeInputData(committedDataBeforeStall);
        waitForCondition(() -> commitRequested.get() == 2, MAX_WAIT_TIME_MS, "StreamsTasks did not request commit.");
        final List<KeyValue<Long, Long>> committedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, committedDataBeforeStall.size(), CONSUMER_GROUP_ID);
        checkResultPerKey(committedRecords, committedDataBeforeStall, "The committed records before stall do not match what expected");
        writeInputData(uncommittedDataBeforeStall);
        final List<KeyValue<Long, Long>> uncommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, dataBeforeStall.size(), null);
        checkResultPerKey(uncommittedRecords, dataBeforeStall, "The uncommitted records before stall do not match what expected");
        LOG.info("Injecting Stall");
        stallInjected.set(true);
        writeInputData(dataToTriggerFirstRebalance);
        LOG.info("Input Data Written");
        waitForCondition(() -> stallingHost.get() != null, MAX_WAIT_TIME_MS, "Expected a host to start stalling");
        final String observedStallingHost = stallingHost.get();
        final KafkaStreams stallingInstance;
        final KafkaStreams remainingInstance;
        if ("streams1".equals(observedStallingHost)) {
            stallingInstance = streams1;
            remainingInstance = streams2;
        } else if ("streams2".equals(observedStallingHost)) {
            stallingInstance = streams2;
            remainingInstance = streams1;
        } else {
            throw new IllegalArgumentException("unexpected host name: " + observedStallingHost);
        }
        waitForCondition(() -> stallingInstance.metadataForAllStreamsClients().size() == 2 && remainingInstance.metadataForAllStreamsClients().size() == 1 && remainingInstance.metadataForAllStreamsClients().iterator().next().topicPartitions().size() == 2, MAX_WAIT_TIME_MS, () -> "Should have rebalanced.\n" + "Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" + "Streams2[" + streams2.metadataForAllStreamsClients() + "]");
        final List<KeyValue<Long, Long>> committedRecordsAfterRebalance = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size(), CONSUMER_GROUP_ID);
        final List<KeyValue<Long, Long>> expectedCommittedRecordsAfterRebalance = new ArrayList<>(uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size());
        expectedCommittedRecordsAfterRebalance.addAll(uncommittedDataBeforeStall);
        expectedCommittedRecordsAfterRebalance.addAll(dataToTriggerFirstRebalance);
        checkResultPerKey(committedRecordsAfterRebalance, expectedCommittedRecordsAfterRebalance, "The all committed records after rebalance do not match what expected");
        LOG.info("Releasing Stall");
        doStall = false;
        waitForCondition(() -> streams1.metadataForAllStreamsClients().size() == 2 && streams2.metadataForAllStreamsClients().size() == 2 && streams1.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2 && streams2.metadataForAllStreamsClients().stream().mapToLong(meta -> meta.topicPartitions().size()).sum() == 2, MAX_WAIT_TIME_MS, () -> "Should have rebalanced.\n" + "Streams1[" + streams1.metadataForAllStreamsClients() + "]\n" + "Streams2[" + streams2.metadataForAllStreamsClients() + "]");
        writeInputData(dataAfterSecondRebalance);
        final List<KeyValue<Long, Long>> allCommittedRecords = readResult(SINGLE_PARTITION_OUTPUT_TOPIC, committedDataBeforeStall.size() + uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size(), CONSUMER_GROUP_ID + "_ALL");
        final int allCommittedRecordsAfterRecoverySize = committedDataBeforeStall.size() + uncommittedDataBeforeStall.size() + dataToTriggerFirstRebalance.size() + dataAfterSecondRebalance.size();
        final List<KeyValue<Long, Long>> allExpectedCommittedRecordsAfterRecovery = new ArrayList<>(allCommittedRecordsAfterRecoverySize);
        allExpectedCommittedRecordsAfterRecovery.addAll(committedDataBeforeStall);
        allExpectedCommittedRecordsAfterRecovery.addAll(uncommittedDataBeforeStall);
        allExpectedCommittedRecordsAfterRecovery.addAll(dataToTriggerFirstRebalance);
        allExpectedCommittedRecordsAfterRecovery.addAll(dataAfterSecondRebalance);
        checkResultPerKey(allCommittedRecords, allExpectedCommittedRecordsAfterRecovery, "The all committed records after recovery do not match what expected");
    }
}
303099.085108kafka
public void shouldRestoreAfterJoinRestart() throws Exception {
    produceMessages(streamOneInput, new KeyValueTimestamp<>("A", "L1", 0), new KeyValueTimestamp<>("A", "L2", 5), new KeyValueTimestamp<>("B", "L3", 11), new KeyValueTimestamp<>("B", "L4", 15), new KeyValueTimestamp<>("C", "L5", 25));
    produceMessages(streamTwoInput, new KeyValueTimestamp<>("A", "R1", 0), new KeyValueTimestamp<>("A", "R2", 5), new KeyValueTimestamp<>("B", "R3", 11), new KeyValueTimestamp<>("B", "R4", 15), new KeyValueTimestamp<>("C", "R5", 25));
    final Serde<Windowed<String>> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10L);
    final KStream<String, String> streamOne = builder.stream(streamOneInput, Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> streamTwo = builder.stream(streamTwoInput, Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> joinedStream = streamOne.join(streamTwo, (v1, v2) -> v1 + "," + v2, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(2)));
    joinedStream.groupByKey().windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(10L))).emitStrategy(emitStrategy).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, getMaterialized()).toStream().to(outputTopic, Produced.with(windowedSerde, new StringSerde()));
    startStreams();
    List<KeyValueTimestamp<Windowed<String>, String>> windowedMessages = receiveMessagesWithTimestamp(new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer(), 10L, String.class, emitFinal ? 5 : 7);
    List<KeyValueTimestamp<Windowed<String>, String>> expectResult;
    if (emitFinal) {
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1+L2,R2", 5), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(1L, 11L)), "0+L2,R2", 5), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(1L, 11L)), "0+L3,R3", 11), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L3,R3+L4,R4", 15), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(12L, 22L)), "0+L4,R4", 15));
    } else {
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1", 0), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(1L, 11L)), "0+L2,R2", 5), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1+L2,R2", 5), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(1L, 11L)), "0+L3,R3", 11), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(12L, 22L)), "0+L4,R4", 15), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L3,R3+L4,R4", 15), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(15L, 25L)), "0+L5,R5", 25));
    }
    assertThat(windowedMessages, is(expectResult));
    kafkaStreams.close();
    kafkaStreams.cleanUp();
    produceMessages(streamOneInput, new KeyValueTimestamp<>("C", "L6", 35));
    produceMessages(streamTwoInput, new KeyValueTimestamp<>("C", "R6", 35));
    startStreams();
    windowedMessages = receiveMessagesWithTimestamp(new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer(), 10L, String.class, emitFinal ? 1 : 2);
    if (emitFinal) {
        expectResult = Collections.singletonList(new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(15L, 25L)), "0+L5,R5", 25));
    } else {
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(26L, 36L)), "0+L6,R6", 35), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), "0+L5,R5+L6,R6", 35));
    }
    assertThat(windowedMessages, is(expectResult));
}
303242.554108kafka
public void shouldRestoreAfterJoinRestart() throws Exception {
    produceMessages(streamOneInput, new KeyValueTimestamp<>("A", "L1", 0), new KeyValueTimestamp<>("A", "L1", 5), new KeyValueTimestamp<>("B", "L2", 11), new KeyValueTimestamp<>("B", "L2", 15), new KeyValueTimestamp<>("C", "L3", 25));
    produceMessages(streamTwoInput, new KeyValueTimestamp<>("A", "R1", 0), new KeyValueTimestamp<>("A", "R1", 5), new KeyValueTimestamp<>("B", "R2", 11), new KeyValueTimestamp<>("B", "R2", 15), new KeyValueTimestamp<>("C", "R3", 25));
    final Serde<Windowed<String>> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10L);
    final KStream<String, String> streamOne = builder.stream(streamOneInput, Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> streamTwo = builder.stream(streamTwoInput, Consumed.with(Serdes.String(), Serdes.String()));
    final KStream<String, String> joinedStream = streamOne.join(streamTwo, (v1, v2) -> v1 + "," + v2, JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(2)));
    joinedStream.groupByKey().windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(10L)).advanceBy(ofMillis(5L))).emitStrategy(emitStrategy).aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, getMaterialized()).toStream().to(outputTopic, Produced.with(windowedSerde, new StringSerde()));
    startStreams();
    List<KeyValueTimestamp<Windowed<String>, String>> windowedMessages = receiveMessagesWithTimestamp(new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer(), 10L, String.class, emitFinal ? 4 : 9);
    List<KeyValueTimestamp<Windowed<String>, String>> expectResult;
    if (emitFinal) {
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1+L1,R1", 5), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+L1,R1", 5), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L2,R2", 11), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), "0+L2,R2", 15));
    } else {
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1", 0), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1+L1,R1", 5), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+L1,R1", 5), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L2,R2", 11), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+L2,R2", 11), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+L2,R2+L2,R2", 15), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), "0+L2,R2", 15), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(20L, 30L)), "0+L3,R3", 25), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), "0+L3,R3", 25));
    }
    assertThat(windowedMessages, is(expectResult));
    kafkaStreams.close();
    kafkaStreams.cleanUp();
    produceMessages(streamOneInput, new KeyValueTimestamp<>("C", "L3", 35));
    produceMessages(streamTwoInput, new KeyValueTimestamp<>("C", "R3", 35));
    startStreams();
    if (emitFinal) {
        windowedMessages = receiveMessagesWithTimestamp(new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer(), 10L, String.class, 1);
        expectResult = Collections.singletonList(new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), "0+L3,R3", 25));
    } else {
        windowedMessages = receiveMessagesWithTimestamp(new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), new StringDeserializer(), 10L, String.class, 2);
        expectResult = asList(new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(30L, 40L)), "0+L3,R3", 35), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(35L, 45L)), "0+L3,R3", 35));
    }
    assertThat(windowedMessages, is(expectResult));
}
304927.1199kafka
private void processEmitUpdateJoin(final TestInputTopic<String, String> inputTopic1, final TestInputTopic<String, String> inputTopic2, final MockApiProcessorSupplier<Windowed<String>, String, Void, Void> supplier) {
    inputTopic1.pipeInput("A", "1", 0L);
    inputTopic1.pipeInput("B", "2", 1L);
    inputTopic1.pipeInput("C", "3", 2L);
    inputTopic1.pipeInput("D", "4", 3L);
    inputTopic1.pipeInput("A", "1", 9L);
    final List<MockApiProcessor<Windowed<String>, String, Void, Void>> processors = supplier.capturedProcessors(3);
    processors.get(0).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1", 0), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 9), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1", 9));
    processors.get(1).checkAndClearProcessResult();
    processors.get(2).checkAndClearProcessResult();
    inputTopic1.pipeInput("A", "1", 5L);
    inputTopic1.pipeInput("B", "2", 6L);
    inputTopic1.pipeInput("D", "4", 7L);
    inputTopic1.pipeInput("B", "2", 8L);
    inputTopic1.pipeInput("C", "3", 9L);
    processors.get(0).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1", 9), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", 9), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2", 6), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2", 6), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4", 7), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 7), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2", 8), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 8), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3", 9), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 9));
    processors.get(1).checkAndClearProcessResult();
    processors.get(2).checkAndClearProcessResult();
    inputTopic2.pipeInput("A", "a", 0L);
    inputTopic2.pipeInput("B", "b", 1L);
    inputTopic2.pipeInput("C", "c", 2L);
    inputTopic2.pipeInput("D", "d", 20L);
    inputTopic2.pipeInput("A", "a", 20L);
    processors.get(0).checkAndClearProcessResult();
    processors.get(1).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a", 0), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b", 1), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+c", 2), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d", 20), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(20, 30)), "0+d", 20), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a", 20), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a", 20));
    processors.get(2).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1%0+a", 9), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2%0+b", 8), new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3%0+c", 9));
    inputTopic2.pipeInput("A", "a", 5L);
    inputTopic2.pipeInput("B", "b", 6L);
    inputTopic2.pipeInput("D", "d", 7L);
    inputTopic2.pipeInput("D", "d", 18L);
    inputTopic2.pipeInput("A", "a", 21L);
    processors.get(0).checkAndClearProcessResult();
    processors.get(1).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a+a", 5), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+a", 5), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b+b", 6), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+b", 6), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+d", 7), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+d", 7), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(10, 20)), "0+d", 18), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d+d", 20), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a+a", 21), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a+a", 21));
    processors.get(2).checkAndClearProcessResult(new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1%0+a+a", 9), new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1%0+a", 9), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2%0+b+b", 8), new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2%0+b", 8), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4%0+d", 7), new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4%0+d", 7));
}
303158.99997kafka
private void completeLargeAssignment(final int numPartitions, final int numClients, final int numThreadsPerClient, final int numStandbys, final Class<? extends TaskAssignor> taskAssignor) {
    final List<String> topic = singletonList("topic");
    final Map<TopicPartition, Long> changelogEndOffsets = new HashMap<>();
    for (int p = 0; p < numPartitions; ++p) {
        changelogEndOffsets.put(new TopicPartition(APPLICATION_ID + "-store-changelog", p), 100_000L);
    }
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    for (int p = 0; p < numPartitions; ++p) {
        partitionInfos.add(new PartitionInfo("topic", p, Node.noNode(), new Node[0], new Node[0]));
    }
    final Cluster clusterMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), partitionInfos, emptySet(), emptySet());
    final Map<String, Object> configMap = new HashMap<>();
    configMap.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID);
    configMap.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:8080");
    final InternalTopologyBuilder builder = new InternalTopologyBuilder();
    builder.addSource(null, "source", null, null, null, "topic");
    builder.addProcessor("processor", new MockApiProcessorSupplier<>(), "source");
    builder.addStateStore(new MockKeyValueStoreBuilder("store", false), "processor");
    final TopologyMetadata topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configMap));
    topologyMetadata.buildAndRewriteTopology();
    @SuppressWarnings("unchecked")
    final Consumer<byte[], byte[]> mainConsumer = mock(Consumer.class);
    final TaskManager taskManager = mock(TaskManager.class);
    when(taskManager.topologyMetadata()).thenReturn(topologyMetadata);
    when(mainConsumer.committed(anySet())).thenReturn(Collections.emptyMap());
    final AdminClient adminClient = createMockAdminClientForAssignor(changelogEndOffsets);
    final ReferenceContainer referenceContainer = new ReferenceContainer();
    referenceContainer.mainConsumer = mainConsumer;
    referenceContainer.adminClient = adminClient;
    referenceContainer.taskManager = taskManager;
    referenceContainer.streamsMetadataState = mock(StreamsMetadataState.class);
    referenceContainer.time = new MockTime();
    configMap.put(InternalConfig.REFERENCE_CONTAINER_PARTITION_ASSIGNOR, referenceContainer);
    configMap.put(InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, taskAssignor.getName());
    configMap.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, numStandbys);
    final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(new MockTime(), new StreamsConfig(configMap), new MockClientSupplier().restoreConsumer, false);
    final StreamsPartitionAssignor partitionAssignor = new StreamsPartitionAssignor();
    partitionAssignor.configure(configMap);
    partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
    final Map<String, Subscription> subscriptions = new HashMap<>();
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            subscriptions.put(getConsumerName(i, client), new Subscription(topic, getInfo(uuidForInt(client), EMPTY_TASKS, EMPTY_TASKS).encode()));
        }
    }
    final long firstAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> firstAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long firstAssignmentEndMs = System.currentTimeMillis();
    final long firstAssignmentDuration = firstAssignmentEndMs - firstAssignmentStartMs;
    if (firstAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The first assignment took too long to complete at " + firstAssignmentDuration + "ms.");
    } else {
        log.info("First assignment took {}ms.", firstAssignmentDuration);
    }
    for (int client = 0; client < numClients; ++client) {
        for (int i = 0; i < numThreadsPerClient; ++i) {
            final String consumer = getConsumerName(i, client);
            final Assignment assignment = firstAssignments.get(consumer);
            final AssignmentInfo info = AssignmentInfo.decode(assignment.userData());
            subscriptions.put(consumer, new Subscription(topic, getInfo(uuidForInt(client), new HashSet<>(info.activeTasks()), info.standbyTasks().keySet()).encode(), assignment.partitions()));
        }
    }
    final long secondAssignmentStartMs = System.currentTimeMillis();
    final Map<String, Assignment> secondAssignments = partitionAssignor.assign(clusterMetadata, new GroupSubscription(subscriptions)).groupAssignment();
    final long secondAssignmentEndMs = System.currentTimeMillis();
    final long secondAssignmentDuration = secondAssignmentEndMs - secondAssignmentStartMs;
    if (secondAssignmentDuration > MAX_ASSIGNMENT_DURATION) {
        throw new AssertionError("The second assignment took too long to complete at " + secondAssignmentDuration + "ms.");
    } else {
        log.info("Second assignment took {}ms.", secondAssignmentDuration);
    }
    assertThat(secondAssignments.size(), is(numClients * numThreadsPerClient));
}
303343.671393spring-framework
protected void doTestMessageAccess(boolean reloadable, boolean fallbackToSystemLocale, boolean expectGermanFallback, boolean useCodeAsDefaultMessage, boolean alwaysUseMessageFormat) {
    StaticApplicationContext ac = new StaticApplicationContext();
    if (reloadable) {
        StaticApplicationContext parent = new StaticApplicationContext();
        parent.refresh();
        ac.setParent(parent);
    }
    MutablePropertyValues pvs = new MutablePropertyValues();
    String basepath = "org/springframework/context/support/";
    String[] basenames;
    if (reloadable) {
        basenames = new String[] { "classpath:" + basepath + "messages", "classpath:" + basepath + "more-messages" };
    } else {
        basenames = new String[] { basepath + "messages", basepath + "more-messages" };
    }
    pvs.add("basenames", basenames);
    if (!fallbackToSystemLocale) {
        pvs.add("fallbackToSystemLocale", Boolean.FALSE);
    }
    if (useCodeAsDefaultMessage) {
        pvs.add("useCodeAsDefaultMessage", Boolean.TRUE);
    }
    if (alwaysUseMessageFormat) {
        pvs.add("alwaysUseMessageFormat", Boolean.TRUE);
    }
    Class<?> clazz = reloadable ? ReloadableResourceBundleMessageSource.class : ResourceBundleMessageSource.class;
    ac.registerSingleton("messageSource", clazz, pvs);
    ac.refresh();
    Locale.setDefault(expectGermanFallback ? Locale.GERMAN : Locale.CANADA);
    assertThat(ac.getMessage("code1", null, Locale.ENGLISH)).isEqualTo("message1");
    Object expected = (fallbackToSystemLocale && expectGermanFallback ? "nachricht2" : "message2");
    assertThat(ac.getMessage("code2", null, Locale.ENGLISH)).isEqualTo(expected);
    assertThat(ac.getMessage("code2", null, Locale.GERMAN)).isEqualTo("nachricht2");
    assertThat(ac.getMessage("code2", null, new Locale("DE", "at"))).isEqualTo("nochricht2");
    assertThat(ac.getMessage("code2", null, new Locale("DE", "at", "oo"))).isEqualTo("noochricht2");
    if (reloadable) {
        assertThat(ac.getMessage("code2", null, Locale.GERMANY)).isEqualTo("nachricht2xml");
    }
    MessageSourceAccessor accessor = new MessageSourceAccessor(ac);
    LocaleContextHolder.setLocale(new Locale("DE", "at"));
    try {
        assertThat(accessor.getMessage("code2")).isEqualTo("nochricht2");
    } finally {
        LocaleContextHolder.setLocale(null);
    }
    assertThat(ac.getMessage("code3", null, Locale.ENGLISH)).isEqualTo("message3");
    MessageSourceResolvable resolvable = new DefaultMessageSourceResolvable("code3");
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("message3");
    resolvable = new DefaultMessageSourceResolvable(new String[] { "code4", "code3" });
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("message3");
    assertThat(ac.getMessage("code3", null, Locale.ENGLISH)).isEqualTo("message3");
    resolvable = new DefaultMessageSourceResolvable(new String[] { "code4", "code3" });
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("message3");
    Object[] args = new Object[] { "Hello", new DefaultMessageSourceResolvable(new String[] { "code1" }) };
    assertThat(ac.getMessage("hello", args, Locale.ENGLISH)).isEqualTo("Hello, message1");
    assertThat(ac.getMessage(null, null, null, Locale.ENGLISH)).isNull();
    assertThat(ac.getMessage(null, null, "default", Locale.ENGLISH)).isEqualTo("default");
    assertThat(ac.getMessage(null, args, "default", Locale.ENGLISH)).isEqualTo("default");
    assertThat(ac.getMessage(null, null, "{0}, default", Locale.ENGLISH)).isEqualTo("{0}, default");
    assertThat(ac.getMessage(null, args, "{0}, default", Locale.ENGLISH)).isEqualTo("Hello, default");
    resolvable = new DefaultMessageSourceResolvable(null, null, "default");
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("default");
    resolvable = new DefaultMessageSourceResolvable(null, args, "default");
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("default");
    resolvable = new DefaultMessageSourceResolvable(null, null, "{0}, default");
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("{0}, default");
    resolvable = new DefaultMessageSourceResolvable(null, args, "{0}, default");
    assertThat(ac.getMessage(resolvable, Locale.ENGLISH)).isEqualTo("Hello, default");
    assertThat(ac.getMessage("hello", new Object[] { "Arg1", "Arg2" }, Locale.ENGLISH)).isEqualTo("Arg1, Arg2");
    assertThat(ac.getMessage("hello", null, Locale.ENGLISH)).isEqualTo("{0}, {1}");
    if (alwaysUseMessageFormat) {
        assertThat(ac.getMessage("escaped", null, Locale.ENGLISH)).isEqualTo("I'm");
    } else {
        assertThat(ac.getMessage("escaped", null, Locale.ENGLISH)).isEqualTo("I''m");
    }
    assertThat(ac.getMessage("escaped", new Object[] { "some arg" }, Locale.ENGLISH)).isEqualTo("I'm");
    if (useCodeAsDefaultMessage) {
        assertThat(ac.getMessage("code4", null, Locale.GERMAN)).isEqualTo("code4");
    } else {
        assertThatExceptionOfType(NoSuchMessageException.class).isThrownBy(() -> ac.getMessage("code4", null, Locale.GERMAN));
    }
}
305544.41196spring-framework
private void doTestAnnotationInfo(AnnotationMetadata metadata) {
    assertThat(metadata.getClassName()).isEqualTo(AnnotatedComponent.class.getName());
    assertThat(metadata.isInterface()).isFalse();
    assertThat(metadata.isAnnotation()).isFalse();
    assertThat(metadata.isAbstract()).isFalse();
    assertThat(metadata.isConcrete()).isTrue();
    assertThat(metadata.hasSuperClass()).isTrue();
    assertThat(metadata.getSuperClassName()).isEqualTo(Object.class.getName());
    assertThat(metadata.getInterfaceNames()).hasSize(1);
    assertThat(metadata.getInterfaceNames()[0]).isEqualTo(Serializable.class.getName());
    assertThat(metadata.isAnnotated(Component.class.getName())).isTrue();
    assertThat(metadata.isAnnotated(NamedComposedAnnotation.class.getName())).isTrue();
    assertThat(metadata.hasAnnotation(Component.class.getName())).isTrue();
    assertThat(metadata.hasAnnotation(Scope.class.getName())).isTrue();
    assertThat(metadata.hasAnnotation(SpecialAttr.class.getName())).isTrue();
    assertThat(metadata.hasAnnotation(NamedComposedAnnotation.class.getName())).isTrue();
    assertThat(metadata.getAnnotationTypes()).containsExactlyInAnyOrder(Component.class.getName(), Scope.class.getName(), SpecialAttr.class.getName(), DirectAnnotation.class.getName(), MetaMetaAnnotation.class.getName(), EnumSubclasses.class.getName(), NamedComposedAnnotation.class.getName());
    AnnotationAttributes compAttrs = (AnnotationAttributes) metadata.getAnnotationAttributes(Component.class.getName());
    assertThat(compAttrs).hasSize(1);
    assertThat(compAttrs.getString("value")).isEqualTo("myName");
    AnnotationAttributes scopeAttrs = (AnnotationAttributes) metadata.getAnnotationAttributes(Scope.class.getName());
    assertThat(scopeAttrs).hasSize(1);
    assertThat(scopeAttrs.getString("value")).isEqualTo("myScope");
    Set<MethodMetadata> methods = metadata.getAnnotatedMethods(DirectAnnotation.class.getName());
    MethodMetadata method = methods.iterator().next();
    assertThat(method.getAnnotationAttributes(DirectAnnotation.class.getName()).get("value")).isEqualTo("direct");
    assertThat(method.getAnnotationAttributes(DirectAnnotation.class.getName()).get("myValue")).isEqualTo("direct");
    List<Object> allMeta = method.getAllAnnotationAttributes(DirectAnnotation.class.getName()).get("value");
    assertThat(new HashSet<>(allMeta)).isEqualTo(new HashSet<>(Arrays.asList("direct", "meta")));
    allMeta = method.getAllAnnotationAttributes(DirectAnnotation.class.getName()).get("additional");
    assertThat(new HashSet<>(allMeta)).isEqualTo(new HashSet<>(List.of("direct")));
    assertThat(metadata.isAnnotated(IsAnnotatedAnnotation.class.getName())).isTrue();
    {
        AnnotationAttributes specialAttrs = (AnnotationAttributes) metadata.getAnnotationAttributes(SpecialAttr.class.getName());
        assertThat(specialAttrs).hasSize(6);
        assertThat(String.class.isAssignableFrom(specialAttrs.getClass("clazz"))).isTrue();
        assertThat(specialAttrs.getEnum("state").equals(Thread.State.NEW)).isTrue();
        AnnotationAttributes nestedAnno = specialAttrs.getAnnotation("nestedAnno");
        assertThat(nestedAnno.getString("value")).isEqualTo("na");
        assertThat(nestedAnno.getEnum("anEnum").equals(SomeEnum.LABEL1)).isTrue();
        assertThat((Class<?>[]) nestedAnno.get("classArray")).isEqualTo(new Class<?>[] { String.class });
        AnnotationAttributes[] nestedAnnoArray = specialAttrs.getAnnotationArray("nestedAnnoArray");
        assertThat(nestedAnnoArray).hasSize(2);
        assertThat(nestedAnnoArray[0].getString("value")).isEqualTo("default");
        assertThat(nestedAnnoArray[0].getEnum("anEnum").equals(SomeEnum.DEFAULT)).isTrue();
        assertThat((Class<?>[]) nestedAnnoArray[0].get("classArray")).isEqualTo(new Class<?>[] { Void.class });
        assertThat(nestedAnnoArray[1].getString("value")).isEqualTo("na1");
        assertThat(nestedAnnoArray[1].getEnum("anEnum").equals(SomeEnum.LABEL2)).isTrue();
        assertThat((Class<?>[]) nestedAnnoArray[1].get("classArray")).isEqualTo(new Class<?>[] { Number.class });
        assertThat(nestedAnnoArray[1].getClassArray("classArray")).isEqualTo(new Class<?>[] { Number.class });
        AnnotationAttributes optional = specialAttrs.getAnnotation("optional");
        assertThat(optional.getString("value")).isEqualTo("optional");
        assertThat(optional.getEnum("anEnum").equals(SomeEnum.DEFAULT)).isTrue();
        assertThat((Class<?>[]) optional.get("classArray")).isEqualTo(new Class<?>[] { Void.class });
        assertThat(optional.getClassArray("classArray")).isEqualTo(new Class<?>[] { Void.class });
        AnnotationAttributes[] optionalArray = specialAttrs.getAnnotationArray("optionalArray");
        assertThat(optionalArray).hasSize(1);
        assertThat(optionalArray[0].getString("value")).isEqualTo("optional");
        assertThat(optionalArray[0].getEnum("anEnum").equals(SomeEnum.DEFAULT)).isTrue();
        assertThat((Class<?>[]) optionalArray[0].get("classArray")).isEqualTo(new Class<?>[] { Void.class });
        assertThat(optionalArray[0].getClassArray("classArray")).isEqualTo(new Class<?>[] { Void.class });
        assertThat(metadata.getAnnotationAttributes(DirectAnnotation.class.getName()).get("value")).isEqualTo("direct");
        allMeta = metadata.getAllAnnotationAttributes(DirectAnnotation.class.getName()).get("value");
        assertThat(new HashSet<>(allMeta)).isEqualTo(new HashSet<>(Arrays.asList("direct", "meta")));
        allMeta = metadata.getAllAnnotationAttributes(DirectAnnotation.class.getName()).get("additional");
        assertThat(new HashSet<>(allMeta)).isEqualTo(new HashSet<>(Arrays.asList("direct", "")));
        assertThat(metadata.getAnnotationAttributes(DirectAnnotation.class.getName()).get("additional")).isEqualTo("");
        assertThat(((String[]) metadata.getAnnotationAttributes(DirectAnnotation.class.getName()).get("additionalArray"))).isEmpty();
    }
    {
        AnnotationAttributes specialAttrs = (AnnotationAttributes) metadata.getAnnotationAttributes(SpecialAttr.class.getName(), true);
        assertThat(specialAttrs).hasSize(6);
        assertThat(specialAttrs.get("clazz")).isEqualTo(String.class.getName());
        assertThat(specialAttrs.getString("clazz")).isEqualTo(String.class.getName());
        AnnotationAttributes nestedAnno = specialAttrs.getAnnotation("nestedAnno");
        assertThat(nestedAnno.getStringArray("classArray")).isEqualTo(new String[] { String.class.getName() });
        assertThat(nestedAnno.getStringArray("classArray")).isEqualTo(new String[] { String.class.getName() });
        AnnotationAttributes[] nestedAnnoArray = specialAttrs.getAnnotationArray("nestedAnnoArray");
        assertThat((String[]) nestedAnnoArray[0].get("classArray")).isEqualTo(new String[] { Void.class.getName() });
        assertThat(nestedAnnoArray[0].getStringArray("classArray")).isEqualTo(new String[] { Void.class.getName() });
        assertThat((String[]) nestedAnnoArray[1].get("classArray")).isEqualTo(new String[] { Number.class.getName() });
        assertThat(nestedAnnoArray[1].getStringArray("classArray")).isEqualTo(new String[] { Number.class.getName() });
        AnnotationAttributes optional = specialAttrs.getAnnotation("optional");
        assertThat((String[]) optional.get("classArray")).isEqualTo(new String[] { Void.class.getName() });
        assertThat(optional.getStringArray("classArray")).isEqualTo(new String[] { Void.class.getName() });
        AnnotationAttributes[] optionalArray = specialAttrs.getAnnotationArray("optionalArray");
        assertThat((String[]) optionalArray[0].get("classArray")).isEqualTo(new String[] { Void.class.getName() });
        assertThat(optionalArray[0].getStringArray("classArray")).isEqualTo(new String[] { Void.class.getName() });
        assertThat(metadata.getAnnotationAttributes(DirectAnnotation.class.getName()).get("value")).isEqualTo("direct");
        allMeta = metadata.getAllAnnotationAttributes(DirectAnnotation.class.getName()).get("value");
        assertThat(new HashSet<>(allMeta)).isEqualTo(new HashSet<>(Arrays.asList("direct", "meta")));
    }
}
304460.561101spring-framework
 void simpleBroker() throws Exception {
    loadBeanDefinitions("websocket-config-broker-simple.xml");
    HandlerMapping hm = this.appContext.getBean(HandlerMapping.class);
    assertThat(hm).isInstanceOf(SimpleUrlHandlerMapping.class);
    SimpleUrlHandlerMapping suhm = (SimpleUrlHandlerMapping) hm;
    assertThat(suhm.getUrlMap()).hasSize(4);
    HttpRequestHandler httpRequestHandler = (HttpRequestHandler) suhm.getUrlMap().get("/foo");
    assertThat(httpRequestHandler).isNotNull();
    assertThat(httpRequestHandler).isInstanceOf(WebSocketHttpRequestHandler.class);
    WebSocketHttpRequestHandler wsHttpRequestHandler = (WebSocketHttpRequestHandler) httpRequestHandler;
    HandshakeHandler handshakeHandler = wsHttpRequestHandler.getHandshakeHandler();
    assertThat(handshakeHandler).isNotNull();
    assertThat(handshakeHandler).isInstanceOf(TestHandshakeHandler.class);
    List<HandshakeInterceptor> interceptors = wsHttpRequestHandler.getHandshakeInterceptors();
    assertThat(interceptors).extracting("class").containsExactly(FooTestInterceptor.class, BarTestInterceptor.class, OriginHandshakeInterceptor.class);
    WebSocketSession session = new TestWebSocketSession("id");
    wsHttpRequestHandler.getWebSocketHandler().afterConnectionEstablished(session);
    assertThat(session.getAttributes().get("decorated")).asInstanceOf(BOOLEAN).isTrue();
    WebSocketHandler wsHandler = wsHttpRequestHandler.getWebSocketHandler();
    assertThat(wsHandler).isInstanceOf(ExceptionWebSocketHandlerDecorator.class);
    wsHandler = ((ExceptionWebSocketHandlerDecorator) wsHandler).getDelegate();
    assertThat(wsHandler).isInstanceOf(LoggingWebSocketHandlerDecorator.class);
    wsHandler = ((LoggingWebSocketHandlerDecorator) wsHandler).getDelegate();
    assertThat(wsHandler).isInstanceOf(TestWebSocketHandlerDecorator.class);
    wsHandler = ((TestWebSocketHandlerDecorator) wsHandler).getDelegate();
    assertThat(wsHandler).isInstanceOf(SubProtocolWebSocketHandler.class);
    assertThat(this.appContext.getBean(MessageBrokerBeanDefinitionParser.WEB_SOCKET_HANDLER_BEAN_NAME)).isSameAs(wsHandler);
    SubProtocolWebSocketHandler subProtocolWsHandler = (SubProtocolWebSocketHandler) wsHandler;
    assertThat(subProtocolWsHandler.getSubProtocols()).isEqualTo(Arrays.asList("v10.stomp", "v11.stomp", "v12.stomp"));
    assertThat(subProtocolWsHandler.getSendTimeLimit()).isEqualTo(25 * 1000);
    assertThat(subProtocolWsHandler.getSendBufferSizeLimit()).isEqualTo(1024 * 1024);
    assertThat(subProtocolWsHandler.getTimeToFirstMessage()).isEqualTo(30 * 1000);
    Map<String, SubProtocolHandler> handlerMap = subProtocolWsHandler.getProtocolHandlerMap();
    StompSubProtocolHandler stompHandler = (StompSubProtocolHandler) handlerMap.get("v12.stomp");
    assertThat(stompHandler).isNotNull();
    assertThat(stompHandler.getMessageSizeLimit()).isEqualTo(128 * 1024);
    assertThat(stompHandler.getErrorHandler()).isNotNull();
    assertThat(stompHandler.getErrorHandler().getClass()).isEqualTo(TestStompErrorHandler.class);
    assertThat(new DirectFieldAccessor(stompHandler).getPropertyValue("eventPublisher")).isNotNull();
    httpRequestHandler = (HttpRequestHandler) suhm.getUrlMap().get("/test/**");
    assertThat(httpRequestHandler).isNotNull();
    assertThat(httpRequestHandler).isInstanceOf(SockJsHttpRequestHandler.class);
    SockJsHttpRequestHandler sockJsHttpRequestHandler = (SockJsHttpRequestHandler) httpRequestHandler;
    wsHandler = unwrapWebSocketHandler(sockJsHttpRequestHandler.getWebSocketHandler());
    assertThat(wsHandler).isNotNull();
    assertThat(wsHandler).isInstanceOf(SubProtocolWebSocketHandler.class);
    assertThat(sockJsHttpRequestHandler.getSockJsService()).isNotNull();
    assertThat(sockJsHttpRequestHandler.getSockJsService()).isInstanceOf(DefaultSockJsService.class);
    DefaultSockJsService defaultSockJsService = (DefaultSockJsService) sockJsHttpRequestHandler.getSockJsService();
    WebSocketTransportHandler wsTransportHandler = (WebSocketTransportHandler) defaultSockJsService.getTransportHandlers().get(TransportType.WEBSOCKET);
    assertThat(wsTransportHandler.getHandshakeHandler()).isNotNull();
    assertThat(wsTransportHandler.getHandshakeHandler()).isInstanceOf(TestHandshakeHandler.class);
    assertThat(defaultSockJsService.shouldSuppressCors()).isFalse();
    ThreadPoolTaskScheduler scheduler = (ThreadPoolTaskScheduler) defaultSockJsService.getTaskScheduler();
    ScheduledThreadPoolExecutor executor = scheduler.getScheduledThreadPoolExecutor();
    assertThat(executor.getCorePoolSize()).isEqualTo(Runtime.getRuntime().availableProcessors());
    assertThat(executor.getRemoveOnCancelPolicy()).isTrue();
    interceptors = defaultSockJsService.getHandshakeInterceptors();
    assertThat(interceptors).extracting("class").containsExactly(FooTestInterceptor.class, BarTestInterceptor.class, OriginHandshakeInterceptor.class);
    assertThat(defaultSockJsService.getAllowedOrigins()).containsExactly("https://mydomain3.com", "https://mydomain4.com");
    assertThat(defaultSockJsService.getAllowedOriginPatterns()).containsExactly("https://*.mydomain.com");
    SimpUserRegistry userRegistry = this.appContext.getBean(SimpUserRegistry.class);
    assertThat(userRegistry).isNotNull();
    assertThat(userRegistry.getClass()).isEqualTo(DefaultSimpUserRegistry.class);
    UserDestinationResolver userDestResolver = this.appContext.getBean(UserDestinationResolver.class);
    assertThat(userDestResolver).isNotNull();
    assertThat(userDestResolver).isInstanceOf(DefaultUserDestinationResolver.class);
    DefaultUserDestinationResolver defaultUserDestResolver = (DefaultUserDestinationResolver) userDestResolver;
    assertThat(defaultUserDestResolver.getDestinationPrefix()).isEqualTo("/personal/");
    UserDestinationMessageHandler userDestHandler = this.appContext.getBean(UserDestinationMessageHandler.class);
    assertThat(userDestHandler).isNotNull();
    SimpleBrokerMessageHandler brokerMessageHandler = this.appContext.getBean(SimpleBrokerMessageHandler.class);
    assertThat(brokerMessageHandler).isNotNull();
    Collection<String> prefixes = brokerMessageHandler.getDestinationPrefixes();
    assertThat(new ArrayList<>(prefixes)).isEqualTo(Arrays.asList("/topic", "/queue"));
    DefaultSubscriptionRegistry registry = (DefaultSubscriptionRegistry) brokerMessageHandler.getSubscriptionRegistry();
    assertThat(registry.getSelectorHeaderName()).isEqualTo("my-selector");
    assertThat(brokerMessageHandler.getTaskScheduler()).isNotNull();
    assertThat(brokerMessageHandler.getHeartbeatValue()).isEqualTo(new long[] { 15000, 15000 });
    assertThat(brokerMessageHandler.isPreservePublishOrder()).isTrue();
    List<Class<? extends MessageHandler>> subscriberTypes = Arrays.asList(SimpAnnotationMethodMessageHandler.class, UserDestinationMessageHandler.class, SimpleBrokerMessageHandler.class);
    testChannel("clientInboundChannel", subscriberTypes, 2);
    testExecutor("clientInboundChannel", Runtime.getRuntime().availableProcessors() * 2, Integer.MAX_VALUE, 60);
    subscriberTypes = Collections.singletonList(SubProtocolWebSocketHandler.class);
    testChannel("clientOutboundChannel", subscriberTypes, 2);
    testExecutor("clientOutboundChannel", Runtime.getRuntime().availableProcessors() * 2, Integer.MAX_VALUE, 60);
    subscriberTypes = Arrays.asList(SimpleBrokerMessageHandler.class, UserDestinationMessageHandler.class);
    testChannel("brokerChannel", subscriberTypes, 1);
    assertThatExceptionOfType(NoSuchBeanDefinitionException.class).isThrownBy(() -> this.appContext.getBean("brokerChannelExecutor", ThreadPoolTaskExecutor.class));
    assertThat(this.appContext.getBean("webSocketScopeConfigurer", CustomScopeConfigurer.class)).isNotNull();
    DirectFieldAccessor accessor = new DirectFieldAccessor(registry);
    Object pathMatcher = accessor.getPropertyValue("pathMatcher");
    String pathSeparator = (String) new DirectFieldAccessor(pathMatcher).getPropertyValue("pathSeparator");
    assertThat(pathSeparator).isEqualTo(".");
}
303861.661984wildfly
public void deploy(final DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (deploymentUnit.hasAttachment(Attachments.MODULE)) {
        BatchLogger.LOGGER.tracef("Processing deployment '%s' for the batch environment.", deploymentUnit.getName());
        final WildFlyJobXmlResolver jobXmlResolver = WildFlyJobXmlResolver.forDeployment(deploymentUnit);
        if (DeploymentTypeMarker.isType(DeploymentType.EAR, deploymentUnit))
            return;
        final Module module = deploymentUnit.getAttachment(Attachments.MODULE);
        final ClassLoader moduleClassLoader = module.getClassLoader();
        final ServiceTarget serviceTarget = phaseContext.getServiceTarget();
        BatchEnvironmentMetaData metaData = deploymentUnit.getAttachment(BatchAttachments.BATCH_ENVIRONMENT_META_DATA);
        if (metaData == null) {
            final DeploymentUnit parent = deploymentUnit.getParent();
            if (parent != null) {
                metaData = parent.getAttachment(BatchAttachments.BATCH_ENVIRONMENT_META_DATA);
            }
        }
        final JobRepository jobRepository = metaData != null ? metaData.getJobRepository() : null;
        final String jobRepositoryName = metaData != null ? metaData.getJobRepositoryName() : null;
        final String dataSourceName = metaData != null ? metaData.getDataSourceName() : null;
        final String jobExecutorName = metaData != null ? metaData.getExecutorName() : null;
        final Boolean restartJobsOnResume = metaData != null ? metaData.getRestartJobsOnResume() : null;
        final Integer executionRecordsLimit = metaData != null ? metaData.getExecutionRecordsLimit() : null;
        final CapabilityServiceSupport support = deploymentUnit.getAttachment(Attachments.CAPABILITY_SERVICE_SUPPORT);
        final String deploymentName = deploymentUnit.getName();
        final EEModuleDescription eeModuleDescription = deploymentUnit.getAttachment(org.jboss.as.ee.component.Attachments.EE_MODULE_DESCRIPTION);
        final NamespaceContextSelector namespaceContextSelector = eeModuleDescription == null ? null : eeModuleDescription.getNamespaceContextSelector();
        final ServiceName batchEnvSN = BatchServiceNames.batchEnvironmentServiceName(deploymentUnit);
        final ServiceBuilder<?> serviceBuilder = serviceTarget.addService(batchEnvSN);
        final Consumer<SecurityAwareBatchEnvironment> batchEnvironmentConsumer = serviceBuilder.provides(batchEnvSN);
        final Supplier<JobExecutor> jobExecutorSupplier = jobExecutorName != null ? serviceBuilder.requires(Capabilities.THREAD_POOL_CAPABILITY.getCapabilityServiceName(jobExecutorName)) : null;
        final Supplier<BatchConfiguration> batchConfigurationSupplier = serviceBuilder.requires(Capabilities.BATCH_CONFIGURATION_CAPABILITY.getCapabilityServiceName());
        serviceBuilder.requires(support.getCapabilityServiceName(Capabilities.LOCAL_TRANSACTION_PROVIDER_CAPABILITY));
        final ServiceName artifactFactoryServiceName = BatchServiceNames.batchArtifactFactoryServiceName(deploymentUnit);
        final ServiceBuilder<?> artifactFactoryServiceBuilder = serviceTarget.addService(artifactFactoryServiceName);
        final Consumer<ArtifactFactory> artifactFactoryConsumer = artifactFactoryServiceBuilder.provides(artifactFactoryServiceName);
        Supplier<BeanManager> beanManagerSupplier = null;
        if (support.hasCapability(WELD_CAPABILITY_NAME)) {
            final WeldCapability api = support.getOptionalCapabilityRuntimeAPI(WELD_CAPABILITY_NAME, WeldCapability.class).get();
            if (api.isPartOfWeldDeployment(deploymentUnit)) {
                BatchLogger.LOGGER.tracef("Adding BeanManager service dependency for deployment %s", deploymentUnit.getName());
                beanManagerSupplier = api.addBeanManagerService(deploymentUnit, artifactFactoryServiceBuilder);
            }
        }
        final ArtifactFactoryService artifactFactoryService = new ArtifactFactoryService(artifactFactoryConsumer, beanManagerSupplier);
        artifactFactoryServiceBuilder.setInstance(artifactFactoryService);
        artifactFactoryServiceBuilder.install();
        final Supplier<WildFlyArtifactFactory> artifactFactorySupplier = serviceBuilder.requires(artifactFactoryServiceName);
        Supplier<JobRepository> jobRepositorySupplier = null;
        if (jobRepositoryName != null) {
            jobRepositorySupplier = serviceBuilder.requires(support.getCapabilityServiceName(Capabilities.JOB_REPOSITORY_CAPABILITY.getName(), jobRepositoryName));
        } else if (dataSourceName != null) {
            final ServiceName jobRepositoryServiceName = support.getCapabilityServiceName(Capabilities.JOB_REPOSITORY_CAPABILITY.getName(), deploymentName);
            final ServiceBuilder<?> jobRepositoryServiceBuilder = serviceTarget.addService(jobRepositoryServiceName);
            final Consumer<JobRepository> jobRepositoryConsumer = jobRepositoryServiceBuilder.provides(jobRepositoryServiceName);
            final Supplier<ExecutorService> executorSupplier = Services.requireServerExecutor(jobRepositoryServiceBuilder);
            final Supplier<DataSource> dataSourceSupplier = jobRepositoryServiceBuilder.requires(support.getCapabilityServiceName(Capabilities.DATA_SOURCE_CAPABILITY, dataSourceName));
            final JdbcJobRepositoryService jdbcJobRepositoryService = new JdbcJobRepositoryService(jobRepositoryConsumer, dataSourceSupplier, executorSupplier, executionRecordsLimit);
            jobRepositoryServiceBuilder.setInstance(jdbcJobRepositoryService);
            jobRepositoryServiceBuilder.install();
            jobRepositorySupplier = serviceBuilder.requires(jobRepositoryServiceName);
        } else if (jobRepository != null) {
            jobRepositorySupplier = () -> jobRepository;
        }
        final Supplier<RequestController> requestControllerSupplier = rcPresent ? serviceBuilder.requires(requestControllerServiceName(support)) : null;
        final BatchEnvironmentService service = new BatchEnvironmentService(batchEnvironmentConsumer, artifactFactorySupplier, jobExecutorSupplier, requestControllerSupplier, jobRepositorySupplier, batchConfigurationSupplier, moduleClassLoader, jobXmlResolver, deploymentName, namespaceContextSelector);
        serviceBuilder.setInstance(service);
        serviceBuilder.install();
        final ServiceName jobOperatorServiceName = BatchServiceNames.jobOperatorServiceName(deploymentUnit);
        final ServiceBuilder<?> jobOperatorServiceSB = serviceTarget.addService(jobOperatorServiceName);
        final Consumer<JobOperator> jobOperatorConsumer = jobOperatorServiceSB.provides(jobOperatorServiceName);
        final Supplier<ExecutorService> executorSupplier = Services.requireServerExecutor(jobOperatorServiceSB);
        final Supplier<BatchConfiguration> batchConfigSupplier = jobOperatorServiceSB.requires(support.getCapabilityServiceName(Capabilities.BATCH_CONFIGURATION_CAPABILITY.getName()));
        final Supplier<SuspendController> suspendControllerSupplier = jobOperatorServiceSB.requires(support.getCapabilityServiceName(Capabilities.SUSPEND_CONTROLLER_CAPABILITY));
        final Supplier<ProcessStateNotifier> processStateSupplier = jobOperatorServiceSB.requires(support.getCapabilityServiceName(Capabilities.PROCESS_STATE_NOTIFIER_CAPABILITY));
        final Supplier<SecurityAwareBatchEnvironment> batchEnvironmentSupplier = jobOperatorServiceSB.requires(BatchServiceNames.batchEnvironmentServiceName(deploymentUnit));
        final JobOperatorService jobOperatorService = new JobOperatorService(jobOperatorConsumer, batchConfigSupplier, batchEnvironmentSupplier, executorSupplier, suspendControllerSupplier, processStateSupplier, restartJobsOnResume, deploymentName, jobXmlResolver);
        jobOperatorServiceSB.setInstance(jobOperatorService);
        jobOperatorServiceSB.install();
        deploymentUnit.putAttachment(BatchAttachments.JOB_OPERATOR, jobOperatorService);
        deploymentUnit.addToAttachmentList(DEPLOYMENT_COMPLETE_SERVICES, jobOperatorServiceName);
        selector.registerContext(moduleClassLoader, JobOperatorContext.create(jobOperatorService));
    }
}
302763.792284wildfly
protected void performRuntime(OperationContext context, ModelNode operation, ModelNode model) throws OperationFailedException {
    final ModelNode address = operation.require(OP_ADDR);
    final String driverName = PathAddress.pathAddress(address).getLastElement().getValue();
    if (operation.get(DRIVER_NAME.getName()).isDefined() && !driverName.equals(operation.get(DRIVER_NAME.getName()).asString())) {
        throw ConnectorLogger.ROOT_LOGGER.driverNameAndResourceNameNotEquals(operation.get(DRIVER_NAME.getName()).asString(), driverName);
    }
    String moduleName = DRIVER_MODULE_NAME.resolveModelAttribute(context, model).asString();
    final Integer majorVersion = model.hasDefined(DRIVER_MAJOR_VERSION.getName()) ? DRIVER_MAJOR_VERSION.resolveModelAttribute(context, model).asInt() : null;
    final Integer minorVersion = model.hasDefined(DRIVER_MINOR_VERSION.getName()) ? DRIVER_MINOR_VERSION.resolveModelAttribute(context, model).asInt() : null;
    final String driverClassName = model.hasDefined(DRIVER_CLASS_NAME.getName()) ? DRIVER_CLASS_NAME.resolveModelAttribute(context, model).asString() : null;
    final String dataSourceClassName = model.hasDefined(DRIVER_DATASOURCE_CLASS_NAME.getName()) ? DRIVER_DATASOURCE_CLASS_NAME.resolveModelAttribute(context, model).asString() : null;
    final String xaDataSourceClassName = model.hasDefined(DRIVER_XA_DATASOURCE_CLASS_NAME.getName()) ? DRIVER_XA_DATASOURCE_CLASS_NAME.resolveModelAttribute(context, model).asString() : null;
    final ServiceTarget target = context.getServiceTarget();
    final ModuleIdentifier moduleId;
    final Module module;
    String slot = model.hasDefined(MODULE_SLOT.getName()) ? MODULE_SLOT.resolveModelAttribute(context, model).asString() : null;
    try {
        moduleId = ModuleIdentifier.create(moduleName, slot);
        module = Module.getCallerModuleLoader().loadModule(moduleId);
    } catch (ModuleNotFoundException e) {
        throw new OperationFailedException(ConnectorLogger.ROOT_LOGGER.missingDependencyInModuleDriver(moduleName, e.getMessage()), e);
    } catch (ModuleLoadException e) {
        throw new OperationFailedException(ConnectorLogger.ROOT_LOGGER.failedToLoadModuleDriver(moduleName), e);
    }
    if (dataSourceClassName != null) {
        Class<? extends DataSource> dsCls;
        try {
            dsCls = module.getClassLoader().loadClass(dataSourceClassName).asSubclass(DataSource.class);
        } catch (ClassNotFoundException e) {
            throw SUBSYSTEM_DATASOURCES_LOGGER.failedToLoadDataSourceClass(dataSourceClassName, e);
        } catch (ClassCastException e) {
            throw SUBSYSTEM_DATASOURCES_LOGGER.notAValidDataSourceClass(dataSourceClassName, DataSource.class.getName());
        }
        checkDSCls(dsCls, DataSource.class);
    }
    if (xaDataSourceClassName != null) {
        Class<? extends XADataSource> dsCls;
        try {
            dsCls = module.getClassLoader().loadClass(xaDataSourceClassName).asSubclass(XADataSource.class);
        } catch (ClassNotFoundException e) {
            throw SUBSYSTEM_DATASOURCES_LOGGER.failedToLoadDataSourceClass(xaDataSourceClassName, e);
        } catch (ClassCastException e) {
            throw SUBSYSTEM_DATASOURCES_LOGGER.notAValidDataSourceClass(xaDataSourceClassName, XADataSource.class.getName());
        }
        checkDSCls(dsCls, XADataSource.class);
    }
    if (driverClassName == null) {
        final ServiceLoader<Driver> serviceLoader = module.loadService(Driver.class);
        boolean driverLoaded = false;
        if (serviceLoader != null) {
            ClassLoader tccl = Thread.currentThread().getContextClassLoader();
            Thread.currentThread().setContextClassLoader(module.getClassLoader());
            try {
                for (Driver driver : serviceLoader) {
                    startDriverServices(target, moduleId, driver, driverName, majorVersion, minorVersion, dataSourceClassName, xaDataSourceClassName);
                    driverLoaded = true;
                    break;
                }
            } finally {
                Thread.currentThread().setContextClassLoader(tccl);
            }
        }
        if (!driverLoaded)
            SUBSYSTEM_DATASOURCES_LOGGER.cannotFindDriverClassName(driverName);
    } else {
        try {
            final Class<? extends Driver> driverClass = module.getClassLoader().loadClass(driverClassName).asSubclass(Driver.class);
            ClassLoader tccl = Thread.currentThread().getContextClassLoader();
            Driver driver = null;
            try {
                Thread.currentThread().setContextClassLoader(module.getClassLoader());
                final Constructor<? extends Driver> constructor = driverClass.getConstructor();
                driver = constructor.newInstance();
            } finally {
                Thread.currentThread().setContextClassLoader(tccl);
            }
            startDriverServices(target, moduleId, driver, driverName, majorVersion, minorVersion, dataSourceClassName, xaDataSourceClassName);
        } catch (Exception e) {
            SUBSYSTEM_DATASOURCES_LOGGER.cannotInstantiateDriverClass(driverClassName, e);
            throw new OperationFailedException(ConnectorLogger.ROOT_LOGGER.cannotInstantiateDriverClass(driverClassName));
        }
    }
}
302733.282484wildfly
public String toString() {
    StringBuilder sb = new StringBuilder();
    sb.append("<datasource");
    if (jndiName != null)
        sb.append(" ").append(Attribute.JNDI_NAME).append("=\"").append(jndiName).append("\"");
    if (poolName != null)
        sb.append(" ").append(Attribute.POOL_NAME).append("=\"").append(poolName).append("\"");
    if (enabled != null)
        sb.append(" ").append(Attribute.ENABLED).append("=\"").append(enabled).append("\"");
    if (useJavaContext != null) {
        sb.append(" ").append(Attribute.USE_JAVA_CONTEXT);
        sb.append("=\"").append(useJavaContext).append("\"");
    }
    if (spy)
        sb.append(" ").append(Attribute.SPY).append("=\"").append(spy).append("\"");
    if (useCcm)
        sb.append(" ").append(Attribute.USE_CCM).append("=\"").append(useCcm).append("\"");
    if (jta)
        sb.append(" ").append(Attribute.JTA).append("=\"").append(jta).append("\"");
    sb.append(">");
    if (connectionUrl != null) {
        sb.append("<").append(Tag.CONNECTION_URL).append(">");
        sb.append(connectionUrl);
        sb.append("</").append(Tag.CONNECTION_URL).append(">");
    }
    if (driverClass != null) {
        sb.append("<").append(Tag.DRIVER_CLASS).append(">");
        sb.append(driverClass);
        sb.append("</").append(Tag.DRIVER_CLASS).append(">");
    }
    if (dataSourceClass != null) {
        sb.append("<").append(Tag.DATASOURCE_CLASS).append(">");
        sb.append(dataSourceClass);
        sb.append("</").append(Tag.DATASOURCE_CLASS).append(">");
    }
    if (driver != null) {
        sb.append("<").append(Tag.DRIVER).append(">");
        sb.append(driver);
        sb.append("</").append(Tag.DRIVER).append(">");
    }
    if (connectionProperties != null && connectionProperties.size() > 0) {
        Iterator<Map.Entry<String, String>> it = connectionProperties.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<String, String> entry = it.next();
            sb.append("<").append(Tag.CONNECTION_PROPERTY);
            sb.append(" name=\"").append(entry.getKey()).append("\">");
            sb.append(entry.getValue());
            sb.append("</").append(Tag.CONNECTION_PROPERTY).append(">");
        }
    }
    if (newConnectionSql != null) {
        sb.append("<").append(Tag.NEW_CONNECTION_SQL).append(">");
        sb.append(newConnectionSql);
        sb.append("</").append(Tag.NEW_CONNECTION_SQL).append(">");
    }
    if (transactionIsolation != null) {
        sb.append("<").append(Tag.TRANSACTION_ISOLATION).append(">");
        sb.append(transactionIsolation);
        sb.append("</").append(Tag.TRANSACTION_ISOLATION).append(">");
    }
    if (urlDelimiter != null) {
        sb.append("<").append(Tag.URL_DELIMITER).append(">");
        sb.append(urlDelimiter);
        sb.append("</").append(Tag.URL_DELIMITER).append(">");
    }
    if (urlSelectorStrategyClassName != null) {
        sb.append("<").append(Tag.URL_SELECTOR_STRATEGY_CLASS_NAME).append(">");
        sb.append(urlSelectorStrategyClassName);
        sb.append("</").append(Tag.URL_SELECTOR_STRATEGY_CLASS_NAME).append(">");
    }
    if (pool != null)
        sb.append(pool);
    if (security != null)
        sb.append(security);
    if (validation != null)
        sb.append(validation);
    if (timeOut != null)
        sb.append(timeOut);
    if (statement != null)
        sb.append(statement);
    sb.append("</datasource>");
    return sb.toString();
}
304221.591884wildfly
public static ModifiableConnDef buildConnectionDefinitionObject(final OperationContext context, final ModelNode connDefModel, final String poolName, final boolean isXa, ExceptionSupplier<CredentialSource, Exception> recoveryCredentialSourceSupplier) throws OperationFailedException, ValidateException {
    Map<String, String> configProperties = new HashMap<String, String>(0);
    String className = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, CLASS_NAME);
    String jndiName = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, JNDI_NAME);
    boolean enabled = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, ENABLED);
    boolean connectable = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, CONNECTABLE);
    Boolean tracking = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, TRACKING);
    boolean useJavaContext = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, USE_JAVA_CONTEXT);
    boolean useCcm = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, USE_CCM);
    boolean sharable = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, SHARABLE);
    boolean enlistment = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, ENLISTMENT);
    final String mcp = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, MCP);
    final Boolean enlistmentTrace = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, ENLISTMENT_TRACE);
    int maxPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(context, connDefModel, MAX_POOL_SIZE);
    int minPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(context, connDefModel, MIN_POOL_SIZE);
    Integer initialPoolSize = ModelNodeUtil.getIntIfSetOrGetDefault(context, connDefModel, INITIAL_POOL_SIZE);
    boolean prefill = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, POOL_PREFILL);
    boolean fair = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, POOL_FAIR);
    boolean useStrictMin = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, POOL_USE_STRICT_MIN);
    String flushStrategyString = POOL_FLUSH_STRATEGY.resolveModelAttribute(context, connDefModel).asString();
    final FlushStrategy flushStrategy = FlushStrategy.forName(flushStrategyString);
    Boolean isSameRM = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, SAME_RM_OVERRIDE);
    boolean interlivng = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, INTERLEAVING);
    boolean padXid = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, PAD_XID);
    boolean wrapXaResource = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, WRAP_XA_RESOURCE);
    boolean noTxSeparatePool = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, NOTXSEPARATEPOOL);
    Integer allocationRetry = ModelNodeUtil.getIntIfSetOrGetDefault(context, connDefModel, ALLOCATION_RETRY);
    Long allocationRetryWaitMillis = ModelNodeUtil.getLongIfSetOrGetDefault(context, connDefModel, ALLOCATION_RETRY_WAIT_MILLIS);
    Long blockingTimeoutMillis = ModelNodeUtil.getLongIfSetOrGetDefault(context, connDefModel, BLOCKING_TIMEOUT_WAIT_MILLIS);
    Long idleTimeoutMinutes = ModelNodeUtil.getLongIfSetOrGetDefault(context, connDefModel, IDLETIMEOUTMINUTES);
    Integer xaResourceTimeout = ModelNodeUtil.getIntIfSetOrGetDefault(context, connDefModel, XA_RESOURCE_TIMEOUT);
    TimeOut timeOut = new TimeOutImpl(blockingTimeoutMillis, idleTimeoutMinutes, allocationRetry, allocationRetryWaitMillis, xaResourceTimeout);
    Extension incrementer = ModelNodeUtil.extractExtension(context, connDefModel, CAPACITY_INCREMENTER_CLASS, CAPACITY_INCREMENTER_PROPERTIES);
    Extension decrementer = ModelNodeUtil.extractExtension(context, connDefModel, CAPACITY_DECREMENTER_CLASS, CAPACITY_DECREMENTER_PROPERTIES);
    final Capacity capacity = new Capacity(incrementer, decrementer);
    Pool pool;
    if (isXa) {
        pool = new XaPoolImpl(minPoolSize, initialPoolSize, maxPoolSize, prefill, useStrictMin, flushStrategy, capacity, fair, isSameRM, interlivng, padXid, wrapXaResource, noTxSeparatePool);
    } else {
        pool = new PoolImpl(minPoolSize, initialPoolSize, maxPoolSize, prefill, useStrictMin, flushStrategy, capacity, fair);
    }
    String securityDomain = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, SECURITY_DOMAIN);
    String securityDomainAndApplication = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, SECURITY_DOMAIN_AND_APPLICATION);
    if (securityDomain != null || securityDomainAndApplication != null) {
        throw new OperationFailedException(SUBSYSTEM_RA_LOGGER.legacySecurityNotSupported());
    }
    String authenticationContext = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, AUTHENTICATION_CONTEXT);
    String authenticationContextAndApplication = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, AUTHENTICATION_CONTEXT_AND_APPLICATION);
    boolean application = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, APPLICATION);
    Security security = null;
    if (authenticationContext != null || authenticationContextAndApplication != null || application) {
        security = new SecurityImpl(authenticationContext, authenticationContextAndApplication, application);
    }
    Long backgroundValidationMillis = ModelNodeUtil.getLongIfSetOrGetDefault(context, connDefModel, BACKGROUNDVALIDATIONMILLIS);
    Boolean backgroundValidation = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, BACKGROUNDVALIDATION);
    boolean useFastFail = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, USE_FAST_FAIL);
    final Boolean validateOnMatch = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, VALIDATE_ON_MATCH);
    Validation validation = new ValidationImpl(validateOnMatch, backgroundValidation, backgroundValidationMillis, useFastFail);
    final String recoveryUsername = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, RECOVERY_USERNAME);
    final String recoveryPassword = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, RECOVERY_PASSWORD);
    final String recoverySecurityDomain = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, RECOVERY_SECURITY_DOMAIN);
    if (recoverySecurityDomain != null) {
        throw new OperationFailedException(SUBSYSTEM_RA_LOGGER.legacySecurityNotSupported());
    }
    final String recoveryAuthenticationContext = ModelNodeUtil.getResolvedStringIfSetOrGetDefault(context, connDefModel, RECOVERY_AUTHENTICATION_CONTEXT);
    Boolean noRecovery = ModelNodeUtil.getBooleanIfSetOrGetDefault(context, connDefModel, NO_RECOVERY);
    Recovery recovery = null;
    if ((recoveryUsername != null && (recoveryPassword != null || recoveryCredentialSourceSupplier != null)) || recoveryAuthenticationContext != null || noRecovery != null) {
        Credential credential = null;
        if ((recoveryUsername != null && (recoveryPassword != null || recoveryCredentialSourceSupplier != null)) || recoveryAuthenticationContext != null)
            credential = new CredentialImpl(recoveryUsername, recoveryPassword, recoveryAuthenticationContext, recoveryCredentialSourceSupplier);
        Extension recoverPlugin = ModelNodeUtil.extractExtension(context, connDefModel, RECOVER_PLUGIN_CLASSNAME, RECOVER_PLUGIN_PROPERTIES);
        if (noRecovery == null)
            noRecovery = Boolean.FALSE;
        recovery = new Recovery(credential, recoverPlugin, noRecovery);
    }
    ModifiableConnDef connectionDefinition = new ModifiableConnDef(configProperties, className, jndiName, poolName, enabled, useJavaContext, useCcm, pool, timeOut, validation, security, recovery, sharable, enlistment, connectable, tracking, mcp, enlistmentTrace);
    return connectionDefinition;
}
302174.742492wildfly
protected WorkManagerSecurity parseWorkManagerSecurity(final ModelNode operation, final XMLStreamReader reader) throws XMLStreamException, ParserException {
    boolean mappingRequired = false;
    String domain = null;
    String defaultPrincipal = null;
    List<String> defaultGroups = null;
    Map<String, String> userMappings = null;
    Map<String, String> groupMappings = null;
    boolean userMappingEnabled = false;
    WM_SECURITY.parseAndSetParameter("true", operation, reader);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    if (WorkManager.Tag.forName(reader.getLocalName()) == WorkManager.Tag.SECURITY) {
                        return new WorkManagerSecurityImpl(mappingRequired, domain, false, defaultPrincipal, defaultGroups, userMappings, groupMappings);
                    } else {
                        if (WorkManagerSecurity.Tag.forName(reader.getLocalName()) == WorkManagerSecurity.Tag.UNKNOWN) {
                            throw new ParserException(bundle.unexpectedEndTag(reader.getLocalName()));
                        }
                    }
                    break;
                }
            case START_ELEMENT:
                {
                    switch(WorkManagerSecurity.Tag.forName(reader.getLocalName())) {
                        case DEFAULT_GROUPS:
                        case MAPPINGS:
                            {
                                break;
                            }
                        case MAPPING_REQUIRED:
                            {
                                String value = rawElementText(reader);
                                WM_SECURITY_MAPPING_REQUIRED.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DOMAIN:
                            {
                                String value = domain = rawElementText(reader);
                                WM_SECURITY_DOMAIN.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case DEFAULT_PRINCIPAL:
                            {
                                String value = rawElementText(reader);
                                WM_SECURITY_DEFAULT_PRINCIPAL.parseAndSetParameter(value, operation, reader);
                                break;
                            }
                        case GROUP:
                            {
                                String value = rawElementText(reader);
                                operation.get(WM_SECURITY_DEFAULT_GROUPS.getName()).add(parse(WM_SECURITY_DEFAULT_GROUP, value, reader));
                                break;
                            }
                        case USERS:
                            {
                                userMappingEnabled = true;
                                break;
                            }
                        case GROUPS:
                            {
                                userMappingEnabled = false;
                                break;
                            }
                        case MAP:
                            {
                                if (userMappingEnabled) {
                                    String from = rawAttributeText(reader, WorkManagerSecurity.Attribute.FROM.getLocalName());
                                    if (from == null || from.trim().equals(""))
                                        throw new ParserException(bundle.requiredAttributeMissing(WorkManagerSecurity.Attribute.FROM.getLocalName(), reader.getLocalName()));
                                    String to = rawAttributeText(reader, WorkManagerSecurity.Attribute.TO.getLocalName());
                                    if (to == null || to.trim().equals(""))
                                        throw new ParserException(bundle.requiredAttributeMissing(WorkManagerSecurity.Attribute.TO.getLocalName(), reader.getLocalName()));
                                    ModelNode object = new ModelNode();
                                    WM_SECURITY_MAPPING_FROM.parseAndSetParameter(from, object, reader);
                                    WM_SECURITY_MAPPING_TO.parseAndSetParameter(to, object, reader);
                                    operation.get(WM_SECURITY_MAPPING_USERS.getName()).add(object);
                                } else {
                                    String from = rawAttributeText(reader, WorkManagerSecurity.Attribute.FROM.getLocalName());
                                    if (from == null || from.trim().equals(""))
                                        throw new ParserException(bundle.requiredAttributeMissing(WorkManagerSecurity.Attribute.FROM.getLocalName(), reader.getLocalName()));
                                    String to = rawAttributeText(reader, WorkManagerSecurity.Attribute.TO.getLocalName());
                                    if (to == null || to.trim().equals(""))
                                        throw new ParserException(bundle.requiredAttributeMissing(WorkManagerSecurity.Attribute.TO.getLocalName(), reader.getLocalName()));
                                    ModelNode object = new ModelNode();
                                    WM_SECURITY_MAPPING_FROM.parseAndSetParameter(from, object, reader);
                                    WM_SECURITY_MAPPING_TO.parseAndSetParameter(to, object, reader);
                                    operation.get(WM_SECURITY_MAPPING_GROUPS.getName()).add(object);
                                }
                                break;
                            }
                        default:
                            throw new ParserException(bundle.unexpectedElement(reader.getLocalName()));
                    }
                    break;
                }
        }
    }
    throw new ParserException(bundle.unexpectedEndOfDocument());
}
302649.1515102wildfly
protected void deployComponent(final DeploymentPhaseContext phaseContext, final ComponentConfiguration configuration, final List<ServiceName> jndiDependencies, final ServiceName bindingDependencyService) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    final ServiceTarget serviceTarget = phaseContext.getServiceTarget();
    final String applicationName = configuration.getApplicationName();
    final String moduleName = configuration.getModuleName();
    final String componentName = configuration.getComponentName();
    final EEApplicationClasses applicationClasses = deploymentUnit.getAttachment(Attachments.EE_APPLICATION_CLASSES_DESCRIPTION);
    final Module module = deploymentUnit.getAttachment(org.jboss.as.server.deployment.Attachments.MODULE);
    final ServiceName createServiceName = configuration.getComponentDescription().getCreateServiceName();
    final ServiceName startServiceName = configuration.getComponentDescription().getStartServiceName();
    final BasicComponentCreateService createService = configuration.getComponentCreateServiceFactory().constructService(configuration);
    final ServiceBuilder<Component> createBuilder = serviceTarget.addService(createServiceName, createService);
    final ComponentStartService startService = new ComponentStartService();
    final ServiceBuilder<Component> startBuilder = serviceTarget.addService(startServiceName, startService);
    deploymentUnit.addToAttachmentList(org.jboss.as.server.deployment.Attachments.DEPLOYMENT_COMPLETE_SERVICES, startServiceName);
    ServiceName jndiDepServiceName = configuration.getComponentDescription().getServiceName().append(JNDI_BINDINGS_SERVICE);
    final ServiceBuilder<Void> jndiDepServiceBuilder = serviceTarget.addService(jndiDepServiceName, Service.NULL);
    jndiDependencies.add(jndiDepServiceName);
    for (DependencyConfigurator configurator : configuration.getCreateDependencies()) {
        configurator.configureDependency(createBuilder, createService);
    }
    for (DependencyConfigurator configurator : configuration.getStartDependencies()) {
        configurator.configureDependency(startBuilder, startService);
    }
    startBuilder.addDependency(createServiceName, BasicComponent.class, startService.getComponentInjector());
    Services.addServerExecutorDependency(startBuilder, startService.getExecutorInjector());
    startBuilder.requires(bindingDependencyService);
    final ServiceName contextServiceName;
    if (configuration.getComponentDescription().getNamingMode() == ComponentNamingMode.CREATE) {
        final NamingStoreService contextService = new NamingStoreService(true);
        serviceTarget.addService(configuration.getComponentDescription().getContextServiceName(), contextService).install();
    }
    final InjectionSource.ResolutionContext resolutionContext = new InjectionSource.ResolutionContext(configuration.getComponentDescription().getNamingMode() == ComponentNamingMode.USE_MODULE, configuration.getComponentName(), configuration.getModuleName(), configuration.getApplicationName());
    for (ViewConfiguration viewConfiguration : configuration.getViews()) {
        final ServiceName serviceName = viewConfiguration.getViewServiceName();
        final ViewService viewService = new ViewService(viewConfiguration);
        final ServiceBuilder<ComponentView> componentViewServiceBuilder = serviceTarget.addService(serviceName, viewService);
        componentViewServiceBuilder.addDependency(createServiceName, Component.class, viewService.getComponentInjector());
        for (final DependencyConfigurator<ViewService> depConfig : viewConfiguration.getDependencies()) {
            depConfig.configureDependency(componentViewServiceBuilder, viewService);
        }
        componentViewServiceBuilder.install();
        startBuilder.requires(serviceName);
        for (BindingConfiguration bindingConfiguration : viewConfiguration.getBindingConfigurations()) {
            final String bindingName = bindingConfiguration.getName();
            final ContextNames.BindInfo bindInfo = ContextNames.bindInfoFor(applicationName, moduleName, componentName, bindingName);
            final BinderService service = new BinderService(bindInfo.getBindName(), bindingConfiguration.getSource());
            jndiDepServiceBuilder.requires(bindInfo.getBinderServiceName());
            ServiceBuilder<ManagedReferenceFactory> serviceBuilder = serviceTarget.addService(bindInfo.getBinderServiceName(), service);
            bindingConfiguration.getSource().getResourceValue(resolutionContext, serviceBuilder, phaseContext, service.getManagedObjectInjector());
            serviceBuilder.addDependency(bindInfo.getParentContextServiceName(), ServiceBasedNamingStore.class, service.getNamingStoreInjector());
            try {
                serviceBuilder.install();
            } catch (DuplicateServiceException e) {
                handleDuplicateService(configuration, bindInfo.getAbsoluteJndiName());
                throw e;
            }
        }
    }
    if (configuration.getComponentDescription().getNamingMode() == ComponentNamingMode.CREATE) {
        final Set<ServiceName> bound = new HashSet<ServiceName>();
        processBindings(phaseContext, configuration, serviceTarget, resolutionContext, configuration.getComponentDescription().getBindingConfigurations(), jndiDepServiceBuilder, bound);
        if (!MetadataCompleteMarker.isMetadataComplete(phaseContext.getDeploymentUnit())) {
            new ClassDescriptionTraversal(configuration.getComponentClass(), applicationClasses) {

                @Override
                protected void handle(final Class<?> clazz, final EEModuleClassDescription classDescription) throws DeploymentUnitProcessingException {
                    if (classDescription != null) {
                        processBindings(phaseContext, configuration, serviceTarget, resolutionContext, classDescription.getBindingConfigurations(), jndiDepServiceBuilder, bound);
                    }
                }
            }.run();
            for (InterceptorDescription interceptor : configuration.getComponentDescription().getAllInterceptors()) {
                final Class<?> interceptorClass;
                try {
                    interceptorClass = module.getClassLoader().loadClass(interceptor.getInterceptorClassName());
                } catch (ClassNotFoundException e) {
                    throw EeLogger.ROOT_LOGGER.cannotLoadInterceptor(e, interceptor.getInterceptorClassName(), configuration.getComponentClass());
                }
                if (interceptorClass != null) {
                    new ClassDescriptionTraversal(interceptorClass, applicationClasses) {

                        @Override
                        protected void handle(final Class<?> clazz, final EEModuleClassDescription classDescription) throws DeploymentUnitProcessingException {
                            if (classDescription != null) {
                                processBindings(phaseContext, configuration, serviceTarget, resolutionContext, classDescription.getBindingConfigurations(), jndiDepServiceBuilder, bound);
                            }
                        }
                    }.run();
                }
            }
        }
    }
    createBuilder.install();
    startBuilder.install();
    jndiDepServiceBuilder.install();
}
302672.963676wildfly
private List<BindingConfiguration> getEnvironmentEntries(final DeploymentDescriptorEnvironment environment, final ClassLoader classLoader, DeploymentReflectionIndex deploymentReflectionIndex, ResourceInjectionTarget resourceInjectionTarget) throws DeploymentUnitProcessingException {
    final List<BindingConfiguration> bindings = new ArrayList<BindingConfiguration>();
    final EnvironmentEntriesMetaData envEntries = environment.getEnvironment().getEnvironmentEntries();
    if (envEntries == null) {
        return bindings;
    }
    for (final EnvironmentEntryMetaData envEntry : envEntries) {
        if (envEntry.isDependencyIgnored()) {
            continue;
        }
        final String name;
        if (envEntry.getName().startsWith("java:")) {
            name = envEntry.getName();
        } else {
            name = environment.getDefaultContext() + envEntry.getEnvEntryName();
        }
        Class<?> classType = null;
        if (envEntry.getType() != null) {
            try {
                classType = this.loadClass(envEntry.getType(), classLoader);
            } catch (ClassNotFoundException e) {
                throw EeLogger.ROOT_LOGGER.cannotLoad(e, envEntry.getType());
            }
        }
        final String value = envEntry.getValue();
        final String lookup = envEntry.getLookupName();
        if (!isEmpty(value) && !isEmpty(lookup)) {
            throw EeLogger.ROOT_LOGGER.cannotSpecifyBoth("<env-entry-value>", "<lookup-name>");
        } else if (isEmpty(lookup) && isEmpty(value)) {
            continue;
        }
        LookupInjectionSource injectionSource = new LookupInjectionSource(name);
        classType = processInjectionTargets(resourceInjectionTarget, injectionSource, classLoader, deploymentReflectionIndex, envEntry, classType);
        if (classType == null) {
            throw EeLogger.ROOT_LOGGER.cannotDetermineType("<env-entry>", name, "<env-entry-type>");
        }
        final String type = classType.getName();
        final BindingConfiguration bindingConfiguration;
        if (!isEmpty(lookup)) {
            bindingConfiguration = new BindingConfiguration(name, new LookupInjectionSource(lookup));
        } else if (type.equals(String.class.getName())) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(value));
        } else if (type.equals(Integer.class.getName()) || type.equals("int")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Integer.valueOf(value)));
        } else if (type.equals(Short.class.getName()) || type.equals("short")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Short.valueOf(value)));
        } else if (type.equals(Long.class.getName()) || type.equals("long")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Long.valueOf(value)));
        } else if (type.equals(Byte.class.getName()) || type.equals("byte")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Byte.valueOf(value)));
        } else if (type.equals(Double.class.getName()) || type.equals("double")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Double.valueOf(value)));
        } else if (type.equals(Float.class.getName()) || type.equals("float")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Float.valueOf(value)));
        } else if (type.equals(Boolean.class.getName()) || type.equals("boolean")) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Boolean.valueOf(value)));
        } else if (type.equals(Character.class.getName()) || type.equals("char")) {
            if (value.length() != 1) {
                throw EeLogger.ROOT_LOGGER.invalidCharacterLength("env-entry", value);
            }
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(value.charAt(0)));
        } else if (type.equals(Class.class.getName())) {
            try {
                bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(classLoader.loadClass(value)));
            } catch (ClassNotFoundException e) {
                throw EeLogger.ROOT_LOGGER.cannotLoad(value);
            }
        } else if (classType.isEnum() || (classType.getEnclosingClass() != null && classType.getEnclosingClass().isEnum())) {
            bindingConfiguration = new BindingConfiguration(name, new EnvEntryInjectionSource(Enum.valueOf((Class) classType, value)));
        } else {
            throw EeLogger.ROOT_LOGGER.unknownElementType("env-entry", type);
        }
        bindings.add(bindingConfiguration);
    }
    return bindings;
}
302241.832590wildfly
private void handleSessionBean(final EJBComponentDescription component, final Module module, final DeploymentReflectionIndex reflectionIndex) throws ClassNotFoundException, DeploymentUnitProcessingException {
    if (component.getDescriptorData() == null) {
        return;
    }
    final Class<?> componentClass = ClassLoadingUtils.loadClass(component.getComponentClassName(), module);
    final EnterpriseBeanMetaData metaData = component.getDescriptorData();
    AroundInvokesMetaData aroundInvokes = null;
    if (metaData instanceof SessionBeanMetaData) {
        aroundInvokes = ((SessionBeanMetaData) metaData).getAroundInvokes();
    } else if (metaData instanceof MessageDrivenBeanMetaData) {
        aroundInvokes = ((MessageDrivenBeanMetaData) metaData).getAroundInvokes();
    }
    if (aroundInvokes != null) {
        for (AroundInvokeMetaData aroundInvoke : aroundInvokes) {
            final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
            String methodName = aroundInvoke.getMethodName();
            MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(Object.class, methodName, InvocationContext.class);
            builder.setAroundInvoke(methodIdentifier);
            if (aroundInvoke.getClassName() == null || aroundInvoke.getClassName().isEmpty()) {
                final String className = ClassReflectionIndexUtil.findRequiredMethod(reflectionIndex, componentClass, methodIdentifier).getDeclaringClass().getName();
                component.addInterceptorMethodOverride(className, builder.build());
            } else {
                component.addInterceptorMethodOverride(aroundInvoke.getClassName(), builder.build());
            }
        }
    }
    LifecycleCallbacksMetaData postConstructs = metaData.getPostConstructs();
    if (postConstructs != null) {
        for (LifecycleCallbackMetaData postConstruct : postConstructs) {
            final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
            String methodName = postConstruct.getMethodName();
            MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName);
            builder.setPostConstruct(methodIdentifier);
            if (postConstruct.getClassName() == null || postConstruct.getClassName().isEmpty()) {
                final String className = ClassReflectionIndexUtil.findRequiredMethod(reflectionIndex, componentClass, methodIdentifier).getDeclaringClass().getName();
                component.addInterceptorMethodOverride(className, builder.build());
            } else {
                component.addInterceptorMethodOverride(postConstruct.getClassName(), builder.build());
            }
        }
    }
    final LifecycleCallbacksMetaData preDestroys = metaData.getPreDestroys();
    if (preDestroys != null) {
        for (final LifecycleCallbackMetaData preDestroy : preDestroys) {
            final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
            final String methodName = preDestroy.getMethodName();
            final MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName);
            builder.setPreDestroy(methodIdentifier);
            if (preDestroy.getClassName() == null || preDestroy.getClassName().isEmpty()) {
                final String className = ClassReflectionIndexUtil.findRequiredMethod(reflectionIndex, componentClass, methodIdentifier).getDeclaringClass().getName();
                component.addInterceptorMethodOverride(className, builder.build());
            } else {
                component.addInterceptorMethodOverride(preDestroy.getClassName(), builder.build());
            }
        }
    }
    if (component.isStateful()) {
        final SessionBeanMetaData sessionBeanMetadata = (SessionBeanMetaData) metaData;
        final LifecycleCallbacksMetaData prePassivates = sessionBeanMetadata.getPrePassivates();
        if (prePassivates != null) {
            for (final LifecycleCallbackMetaData prePassivate : prePassivates) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                final String methodName = prePassivate.getMethodName();
                final MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName);
                builder.setPrePassivate(methodIdentifier);
                if (prePassivate.getClassName() == null || prePassivate.getClassName().isEmpty()) {
                    final String className = ClassReflectionIndexUtil.findRequiredMethod(reflectionIndex, componentClass, methodIdentifier).getDeclaringClass().getName();
                    component.addInterceptorMethodOverride(className, builder.build());
                } else {
                    component.addInterceptorMethodOverride(prePassivate.getClassName(), builder.build());
                }
            }
        }
        final LifecycleCallbacksMetaData postActivates = sessionBeanMetadata.getPostActivates();
        if (postActivates != null) {
            for (final LifecycleCallbackMetaData postActivate : postActivates) {
                final InterceptorClassDescription.Builder builder = InterceptorClassDescription.builder();
                final String methodName = postActivate.getMethodName();
                final MethodIdentifier methodIdentifier = MethodIdentifier.getIdentifier(void.class, methodName);
                builder.setPostActivate(methodIdentifier);
                if (postActivate.getClassName() == null || postActivate.getClassName().isEmpty()) {
                    final String className = ClassReflectionIndexUtil.findRequiredMethod(reflectionIndex, componentClass, methodIdentifier).getDeclaringClass().getName();
                    component.addInterceptorMethodOverride(className, builder.build());
                } else {
                    component.addInterceptorMethodOverride(postActivate.getClassName(), builder.build());
                }
            }
        }
    }
}
301749.5923104wildfly
private void parseCalendarTimer(XMLExtendedStreamReader reader, List<TimerImpl> timers) throws XMLStreamException {
    LoadableElements loadableElements = new LoadableElements();
    CalendarTimer.Builder builder = CalendarTimer.builder();
    builder.setAutoTimer(false).setPersistent(true);
    final Set<String> required = new HashSet<>(Arrays.asList(new String[] { TIMED_OBJECT_ID, TIMER_ID, TIMER_STATE, SCHEDULE_EXPR_SECOND, SCHEDULE_EXPR_MINUTE, SCHEDULE_EXPR_HOUR, SCHEDULE_EXPR_DAY_OF_WEEK, SCHEDULE_EXPR_DAY_OF_MONTH, SCHEDULE_EXPR_MONTH, SCHEDULE_EXPR_YEAR }));
    final ScheduleExpression scheduleExpression = new ScheduleExpression();
    for (int i = 0; i < reader.getAttributeCount(); ++i) {
        String attr = reader.getAttributeValue(i);
        String attrName = reader.getAttributeLocalName(i);
        required.remove(attrName);
        boolean handled = handleCommonAttributes(builder, reader, i);
        if (!handled) {
            switch(attrName) {
                case SCHEDULE_EXPR_SECOND:
                    scheduleExpression.second(attr);
                    break;
                case SCHEDULE_EXPR_MINUTE:
                    scheduleExpression.minute(attr);
                    break;
                case SCHEDULE_EXPR_HOUR:
                    scheduleExpression.hour(attr);
                    break;
                case SCHEDULE_EXPR_DAY_OF_WEEK:
                    scheduleExpression.dayOfWeek(attr);
                    break;
                case SCHEDULE_EXPR_DAY_OF_MONTH:
                    scheduleExpression.dayOfMonth(attr);
                    break;
                case SCHEDULE_EXPR_MONTH:
                    scheduleExpression.month(attr);
                    break;
                case SCHEDULE_EXPR_YEAR:
                    scheduleExpression.year(attr);
                    break;
                case SCHEDULE_EXPR_START_DATE:
                    scheduleExpression.start(new Date(Long.parseLong(attr)));
                    break;
                case SCHEDULE_EXPR_END_DATE:
                    scheduleExpression.end(new Date(Long.parseLong(attr)));
                    break;
                case SCHEDULE_EXPR_TIMEZONE:
                    scheduleExpression.timezone(attr);
                    break;
                default:
                    throw ParseUtils.unexpectedAttribute(reader, i);
            }
        }
    }
    if (!required.isEmpty()) {
        throw ParseUtils.missingRequired(reader, required);
    }
    builder.setScheduleExpression(scheduleExpression);
    while (reader.hasNext()) {
        switch(reader.nextTag()) {
            case END_ELEMENT:
                {
                    try {
                        if (loadableElements.info != null) {
                            builder.setInfo((Serializable) deserialize(loadableElements.info));
                        }
                        if (loadableElements.methodName != null) {
                            Method timeoutMethod = CalendarTimer.getTimeoutMethod(new TimeoutMethod(loadableElements.className, loadableElements.methodName, loadableElements.params.toArray(new String[loadableElements.params.size()])), classLoader);
                            if (timeoutMethod != null) {
                                builder.setTimeoutMethod(timeoutMethod);
                                timers.add(builder.build(timerService));
                            } else {
                                builder.setId("deleted-timer");
                                timers.add(builder.build(timerService));
                                EjbLogger.EJB3_TIMER_LOGGER.timerReinstatementFailed(builder.getTimedObjectId(), builder.getId(), null);
                            }
                        } else {
                            timers.add(builder.build(timerService));
                        }
                    } catch (Exception e) {
                        EjbLogger.EJB3_TIMER_LOGGER.timerReinstatementFailed(builder.getTimedObjectId(), builder.getId(), e);
                    }
                    return;
                }
            case START_ELEMENT:
                {
                    boolean handled = handleCommonElements(reader, loadableElements);
                    if (!handled) {
                        switch(reader.getName().getLocalPart()) {
                            case TIMEOUT_METHOD:
                                {
                                    builder.setAutoTimer(true);
                                    parseTimeoutMethod(reader, loadableElements);
                                    break;
                                }
                            default:
                                throw ParseUtils.unexpectedElement(reader);
                        }
                    }
                }
        }
    }
}
302638.222487wildfly
private ValueBoxDefImpl addArray(Class cls) throws RMIIIOPViolationException, IRConstructionException {
    if (!cls.isArray())
        throw IIOPLogger.ROOT_LOGGER.classIsNotArray(cls.getName());
    ValueBoxDefImpl vbDef;
    vbDef = (ValueBoxDefImpl) arrayMap.get(cls);
    if (vbDef != null)
        return vbDef;
    int dimensions = 0;
    Class compType = cls;
    do {
        compType = compType.getComponentType();
        ++dimensions;
    } while (compType.isArray());
    String typeName;
    String moduleName;
    TypeCode typeCode;
    if (compType.isPrimitive()) {
        if (compType == Boolean.TYPE) {
            typeName = "boolean";
            typeCode = orb.get_primitive_tc(TCKind.tk_boolean);
        } else if (compType == Character.TYPE) {
            typeName = "wchar";
            typeCode = orb.get_primitive_tc(TCKind.tk_wchar);
        } else if (compType == Byte.TYPE) {
            typeName = "octet";
            typeCode = orb.get_primitive_tc(TCKind.tk_octet);
        } else if (compType == Short.TYPE) {
            typeName = "short";
            typeCode = orb.get_primitive_tc(TCKind.tk_short);
        } else if (compType == Integer.TYPE) {
            typeName = "long";
            typeCode = orb.get_primitive_tc(TCKind.tk_long);
        } else if (compType == Long.TYPE) {
            typeName = "long_long";
            typeCode = orb.get_primitive_tc(TCKind.tk_longlong);
        } else if (compType == Float.TYPE) {
            typeName = "float";
            typeCode = orb.get_primitive_tc(TCKind.tk_float);
        } else if (compType == Double.TYPE) {
            typeName = "double";
            typeCode = orb.get_primitive_tc(TCKind.tk_double);
        } else {
            throw IIOPLogger.ROOT_LOGGER.unknownPrimitiveType(compType.getName());
        }
        moduleName = "org.omg.boxedRMI";
    } else {
        typeCode = getTypeCode(compType);
        if (compType == String.class)
            typeName = getJavaLangString().name();
        else if (compType == Object.class)
            typeName = getJavaLang_Object().name();
        else if (compType == Class.class)
            typeName = getJavaxRmiCORBAClassDesc().name();
        else if (compType == java.io.Serializable.class)
            typeName = getJavaIoSerializable().name();
        else if (compType == java.io.Externalizable.class)
            typeName = getJavaIoExternalizable().name();
        else if (compType.isInterface() && !RmiIdlUtil.isAbstractValueType(compType))
            typeName = ((InterfaceDefImpl) interfaceMap.get(compType)).name();
        else if (Exception.class.isAssignableFrom(compType))
            typeName = ((ExceptionDefImpl) exceptionMap.get(compType)).name();
        else
            typeName = ((ValueDefImpl) valueMap.get(compType)).name();
        moduleName = "org.omg.boxedRMI." + compType.getPackage().getName();
    }
    ModuleDefImpl m = ensurePackageExists(moduleName);
    Class[] types = new Class[dimensions];
    types[dimensions - 1] = cls;
    for (int i = dimensions - 2; i >= 0; --i) types[i] = types[i + 1].getComponentType();
    for (int i = 0; i < dimensions; ++i) {
        Class type = types[i];
        typeCode = orb.create_sequence_tc(0, typeCode);
        vbDef = (ValueBoxDefImpl) arrayMap.get(type);
        if (vbDef == null) {
            String id = Util.getIRIdentifierOfClass(type);
            SequenceDefImpl sdi = new SequenceDefImpl(typeCode, impl);
            String name = "seq" + (i + 1) + "_" + typeName;
            TypeCode boxTypeCode = orb.create_value_box_tc(id, name, typeCode);
            vbDef = new ValueBoxDefImpl(id, name, "1.0", m, boxTypeCode, impl);
            addTypeCode(type, vbDef.type());
            m.add(name, vbDef);
            impl.putSequenceImpl(id, typeCode, sdi, vbDef);
            arrayMap.put(type, vbDef);
            typeCode = boxTypeCode;
        } else
            typeCode = vbDef.type();
    }
    return vbDef;
}
301816.542594wildfly
private static PersistenceUnitMetadata parsePU(XMLStreamReader reader, Version version, final PropertyReplacer propertyReplacer) throws XMLStreamException {
    PersistenceUnitMetadata pu = new PersistenceUnitMetadataImpl();
    List<String> classes = new ArrayList<String>(1);
    List<String> jarFiles = new ArrayList<String>(1);
    List<String> mappingFiles = new ArrayList<String>(1);
    Properties properties = new Properties();
    pu.setTransactionType(PersistenceUnitTransactionType.JTA);
    pu.setValidationMode(ValidationMode.AUTO);
    pu.setSharedCacheMode(SharedCacheMode.UNSPECIFIED);
    pu.setPersistenceProviderClassName(Configuration.PROVIDER_CLASS_DEFAULT);
    pu.setPersistenceXMLSchemaVersion(version.getVersion());
    final int count = reader.getAttributeCount();
    for (int i = 0; i < count; i++) {
        final String value = reader.getAttributeValue(i);
        if (traceEnabled) {
            ROOT_LOGGER.tracef("parse persistence.xml: attribute value(%d) = %s", i, value);
        }
        final String attributeNamespace = reader.getAttributeNamespace(i);
        if (attributeNamespace != null && !attributeNamespace.isEmpty()) {
            continue;
        }
        final Attribute attribute = Attribute.forName(reader.getAttributeLocalName(i));
        switch(attribute) {
            case NAME:
                pu.setPersistenceUnitName(value);
                break;
            case TRANSACTIONTYPE:
                if (value.equalsIgnoreCase("RESOURCE_LOCAL"))
                    pu.setTransactionType(PersistenceUnitTransactionType.RESOURCE_LOCAL);
                break;
            default:
                throw unexpectedAttribute(reader, i);
        }
    }
    while (reader.hasNext() && reader.nextTag() != END_ELEMENT) {
        final Element element = Element.forName(reader.getLocalName());
        if (traceEnabled) {
            ROOT_LOGGER.tracef("parse persistence.xml: element=%s", element.getLocalName());
        }
        switch(element) {
            case CLASS:
                classes.add(getElement(reader, propertyReplacer));
                break;
            case DESCRIPTION:
                final String description = getElement(reader, propertyReplacer);
                break;
            case EXCLUDEUNLISTEDCLASSES:
                String text = getElement(reader, propertyReplacer);
                if (text == null || text.isEmpty()) {
                    pu.setExcludeUnlistedClasses(true);
                } else {
                    pu.setExcludeUnlistedClasses(Boolean.valueOf(text));
                }
                break;
            case JARFILE:
                String file = getElement(reader, propertyReplacer);
                jarFiles.add(file);
                break;
            case JTADATASOURCE:
                pu.setJtaDataSourceName(getElement(reader, propertyReplacer));
                break;
            case NONJTADATASOURCE:
                pu.setNonJtaDataSourceName(getElement(reader, propertyReplacer));
                break;
            case MAPPINGFILE:
                mappingFiles.add(getElement(reader, propertyReplacer));
                break;
            case PROPERTIES:
                parseProperties(reader, properties, propertyReplacer);
                break;
            case PROVIDER:
                pu.setPersistenceProviderClassName(getElement(reader, propertyReplacer));
                break;
            case SHAREDCACHEMODE:
                String cm = getElement(reader, propertyReplacer);
                pu.setSharedCacheMode(SharedCacheMode.valueOf(cm));
                break;
            case VALIDATIONMODE:
                String validationMode = getElement(reader, propertyReplacer);
                pu.setValidationMode(ValidationMode.valueOf(validationMode));
                break;
            default:
                throw unexpectedElement(reader);
        }
    }
    if (traceEnabled) {
        ROOT_LOGGER.trace("parse persistence.xml: reached ending persistence-unit tag");
    }
    pu.setManagedClassNames(classes);
    pu.setJarFiles(jarFiles);
    pu.setMappingFiles(mappingFiles);
    pu.setProperties(properties);
    return pu;
}
302217.062595wildfly
private void organizeAttributes(PathAddress address, ModelNode description, ModelNode resource, ModelNode resourceNoDefaults, Map<String, ModelNode> expressionAttrs, Map<String, ModelNode> otherAttrs, Map<String, ModelNode> expectedAttrs) {
    ModelNode attributeDescriptions = description.get(ATTRIBUTES);
    for (Property descProp : attributeDescriptions.asPropertyList()) {
        String attrName = descProp.getName();
        ModelNode attrDesc = descProp.getValue();
        if (isAttributeExcluded(address, attrName, attrDesc, resourceNoDefaults)) {
            continue;
        }
        ModelNode noDefaultValue = resourceNoDefaults.get(attrName);
        if (!noDefaultValue.isDefined()) {
            Set<String> base = new HashSet<String>();
            base.add(attrName);
            if (attrDesc.hasDefined(REQUIRES)) {
                for (ModelNode node : attrDesc.get(REQUIRES).asList()) {
                    base.add(node.asString());
                }
            }
            boolean conflict = false;
            for (String baseAttr : base) {
                if (!resource.hasDefined(baseAttr)) {
                    conflict = true;
                    break;
                }
                ModelNode baseAttrAlts = attributeDescriptions.get(baseAttr, ALTERNATIVES);
                if (baseAttrAlts.isDefined()) {
                    for (ModelNode alt : baseAttrAlts.asList()) {
                        String altName = alt.asString();
                        if (resourceNoDefaults.hasDefined(alt.asString()) || expressionAttrs.containsKey(altName) || otherAttrs.containsKey(altName)) {
                            conflict = true;
                            break;
                        }
                    }
                }
            }
            if (conflict) {
                conflicts++;
                logHandling("Skipping conflicted attribute " + attrName + " at " + address.toModelNode().asString());
                continue;
            }
        }
        ModelNode attrValue = resource.get(attrName);
        ModelType attrType = attrValue.getType();
        if (attrDesc.get(EXPRESSIONS_ALLOWED).asBoolean(false)) {
            if (attrType != ModelType.UNDEFINED && attrType != ModelType.EXPRESSION) {
                if (COMPLEX_TYPES.contains(attrType)) {
                    ModelNode valueType = attrDesc.get(VALUE_TYPE);
                    if (valueType.getType() == ModelType.TYPE) {
                        handleSimpleCollection(address, attrName, attrValue, valueType.asType(), expressionAttrs, otherAttrs, expectedAttrs);
                    } else if (valueType.isDefined()) {
                        handleComplexCollection(address, attrName, attrValue, attrType, valueType, expressionAttrs, otherAttrs, expectedAttrs);
                    } else {
                        noSimple++;
                        logNoExpressions(address, attrName);
                        otherAttrs.put(attrName, attrValue);
                        expectedAttrs.put(attrName, attrValue);
                    }
                } else {
                    if (attrType == ModelType.STRING) {
                        checkForUnconvertedExpression(address, attrName, attrValue);
                    }
                    String expression = "${exp.test:" + attrValue.asString() + "}";
                    expressionAttrs.put(attrName, new ModelNode(expression));
                    expectedAttrs.put(attrName, new ModelNode().set(new ValueExpression(expression)));
                    simple++;
                    logHandling("Added expression to simple attribute " + attrName + " at " + address.toModelNode().asString());
                }
            } else {
                if (attrType != ModelType.EXPRESSION) {
                    supportedUndefined++;
                    logHandling("Expression supported but value undefined on simple attribute " + attrName + " at " + address.toModelNode().asString());
                } else {
                    simple++;
                    logHandling("Already found an expression on simple attribute " + attrName + " at " + address.toModelNode().asString());
                }
                otherAttrs.put(attrName, attrValue);
                expectedAttrs.put(attrName, attrValue);
            }
        } else if (COMPLEX_TYPES.contains(attrType) && attrDesc.get(VALUE_TYPE).getType() != ModelType.TYPE && attrDesc.get(VALUE_TYPE).isDefined()) {
            handleComplexCollection(address, attrName, attrValue, attrType, attrDesc.get(VALUE_TYPE), expressionAttrs, otherAttrs, expectedAttrs);
        } else {
            noSimple++;
            logNoExpressions(address, attrName);
            otherAttrs.put(attrName, attrValue);
            expectedAttrs.put(attrName, attrValue);
        }
    }
}
303544.28103wildfly
public void test(@ArquillianResource(SessionOperationServlet.class) @OperateOnDeployment(DEPLOYMENT_1) URL baseURL) throws IOException, URISyntaxException {
    try (CloseableHttpClient client1 = HttpClients.createDefault()) {
        try (CloseableHttpClient client2 = HttpClients.createDefault()) {
            try {
                String session1 = null;
                try (CloseableHttpResponse response = client1.execute(new HttpPut(SessionOperationServlet.createURI(baseURL, "a", "1")))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                    assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                    session1 = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
                }
                Map<String, Queue<SessionOperationServlet.EventType>> events = new HashMap<>();
                Map<String, SessionOperationServlet.EventType> expectedEvents = new HashMap<>();
                events.put(session1, new LinkedList<>());
                expectedEvents.put(session1, SessionOperationServlet.EventType.PASSIVATION);
                Thread.sleep(COMMIT_DURATION.toMillis());
                Instant start = Instant.now();
                String session2 = null;
                try (CloseableHttpResponse response = client2.execute(new HttpPut(SessionOperationServlet.createURI(baseURL, "a", "2")))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                    assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                    session2 = response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue();
                    events.put(session2, new LinkedList<>());
                    expectedEvents.put(session2, SessionOperationServlet.EventType.PASSIVATION);
                    collectEvents(response, events);
                }
                while (events.get(session1).isEmpty() && Duration.between(start, Instant.now()).compareTo(MAX_PASSIVATION_DURATION) < 0) {
                    try (CloseableHttpResponse response = client2.execute(new HttpGet(SessionOperationServlet.createURI(baseURL, "a")))) {
                        assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                        assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                        assertEquals(session2, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                        assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
                        assertEquals("2", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
                        collectEvents(response, events);
                    }
                    Thread.sleep(PASSIVATION_WAIT_DURATION.toMillis());
                }
                assertFalse(events.get(session1).isEmpty());
                validateEvents(session1, events, expectedEvents);
                Thread.sleep(COMMIT_DURATION.toMillis());
                start = Instant.now();
                try (CloseableHttpResponse response = client1.execute(new HttpGet(SessionOperationServlet.createURI(baseURL, "a")))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                    assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                    assertEquals(session1, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                    assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
                    assertEquals("1", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
                    collectEvents(response, events);
                    assertFalse(events.get(session1).isEmpty());
                    assertTrue(events.get(session1).contains(SessionOperationServlet.EventType.ACTIVATION));
                }
                while (events.get(session2).isEmpty() && Duration.between(start, Instant.now()).compareTo(MAX_PASSIVATION_DURATION) < 0) {
                    try (CloseableHttpResponse response = client1.execute(new HttpGet(SessionOperationServlet.createURI(baseURL, "a")))) {
                        assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                        assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                        assertEquals(session1, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                        assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
                        assertEquals("1", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
                        collectEvents(response, events);
                    }
                    Thread.sleep(PASSIVATION_WAIT_DURATION.toMillis());
                }
                assertFalse(events.get(session2).isEmpty());
                validateEvents(session2, events, expectedEvents);
                Thread.sleep(COMMIT_DURATION.toMillis());
                start = Instant.now();
                try (CloseableHttpResponse response = client2.execute(new HttpGet(SessionOperationServlet.createURI(baseURL, "a")))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                    assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                    assertEquals(session2, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                    assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
                    assertEquals("2", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
                    collectEvents(response, events);
                    assertFalse(events.get(session2).isEmpty());
                    assertTrue(events.get(session2).contains(SessionOperationServlet.EventType.ACTIVATION));
                }
                while (!events.get(session1).isEmpty() && Duration.between(start, Instant.now()).compareTo(MAX_PASSIVATION_DURATION) < 0) {
                    try (CloseableHttpResponse response = client2.execute(new HttpGet(SessionOperationServlet.createURI(baseURL, "a")))) {
                        assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                        assertTrue(response.containsHeader(SessionOperationServlet.SESSION_ID));
                        assertEquals(session2, response.getFirstHeader(SessionOperationServlet.SESSION_ID).getValue());
                        assertTrue(response.containsHeader(SessionOperationServlet.RESULT));
                        assertEquals("2", response.getFirstHeader(SessionOperationServlet.RESULT).getValue());
                        collectEvents(response, events);
                    }
                    Thread.sleep(PASSIVATION_WAIT_DURATION.toMillis());
                }
                assertFalse(events.get(session1).isEmpty());
                validateEvents(session1, events, expectedEvents);
                validateEvents(session2, events, expectedEvents);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            } finally {
                try (CloseableHttpResponse response = client1.execute(new HttpDelete(SessionOperationServlet.createURI(baseURL)))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                }
                try (CloseableHttpResponse response = client2.execute(new HttpDelete(SessionOperationServlet.createURI(baseURL)))) {
                    assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode());
                }
            }
        }
    }
}
302217.062595wildfly
private void organizeAttributes(PathAddress address, ModelNode description, ModelNode resource, ModelNode resourceNoDefaults, Map<String, ModelNode> expressionAttrs, Map<String, ModelNode> otherAttrs, Map<String, ModelNode> expectedAttrs) {
    ModelNode attributeDescriptions = description.get(ATTRIBUTES);
    for (Property descProp : attributeDescriptions.asPropertyList()) {
        String attrName = descProp.getName();
        ModelNode attrDesc = descProp.getValue();
        if (isAttributeExcluded(address, attrName, attrDesc, resourceNoDefaults)) {
            continue;
        }
        ModelNode noDefaultValue = resourceNoDefaults.get(attrName);
        if (!noDefaultValue.isDefined()) {
            Set<String> base = new HashSet<String>();
            base.add(attrName);
            if (attrDesc.hasDefined(REQUIRES)) {
                for (ModelNode node : attrDesc.get(REQUIRES).asList()) {
                    base.add(node.asString());
                }
            }
            boolean conflict = false;
            for (String baseAttr : base) {
                if (!resource.hasDefined(baseAttr)) {
                    conflict = true;
                    break;
                }
                ModelNode baseAttrAlts = attributeDescriptions.get(baseAttr, ALTERNATIVES);
                if (baseAttrAlts.isDefined()) {
                    for (ModelNode alt : baseAttrAlts.asList()) {
                        String altName = alt.asString();
                        if (resourceNoDefaults.hasDefined(alt.asString()) || expressionAttrs.containsKey(altName) || otherAttrs.containsKey(altName)) {
                            conflict = true;
                            break;
                        }
                    }
                }
            }
            if (conflict) {
                conflicts++;
                logHandling("Skipping conflicted attribute " + attrName + " at " + address.toModelNode().asString());
                continue;
            }
        }
        ModelNode attrValue = resource.get(attrName);
        ModelType attrType = attrValue.getType();
        if (attrDesc.get(EXPRESSIONS_ALLOWED).asBoolean(false)) {
            if (attrType != ModelType.UNDEFINED && attrType != ModelType.EXPRESSION) {
                if (COMPLEX_TYPES.contains(attrType)) {
                    ModelNode valueType = attrDesc.get(VALUE_TYPE);
                    if (valueType.getType() == ModelType.TYPE) {
                        handleSimpleCollection(address, attrName, attrValue, valueType.asType(), expressionAttrs, otherAttrs, expectedAttrs);
                    } else if (valueType.isDefined()) {
                        handleComplexCollection(address, attrName, attrValue, attrType, valueType, expressionAttrs, otherAttrs, expectedAttrs);
                    } else {
                        noSimple++;
                        logNoExpressions(address, attrName);
                        otherAttrs.put(attrName, attrValue);
                        expectedAttrs.put(attrName, attrValue);
                    }
                } else {
                    if (attrType == ModelType.STRING) {
                        checkForUnconvertedExpression(address, attrName, attrValue);
                    }
                    String expression = "${exp.test:" + attrValue.asString() + "}";
                    expressionAttrs.put(attrName, new ModelNode(expression));
                    expectedAttrs.put(attrName, new ModelNode().set(new ValueExpression(expression)));
                    simple++;
                    logHandling("Added expression to simple attribute " + attrName + " at " + address.toModelNode().asString());
                }
            } else {
                if (attrType != ModelType.EXPRESSION) {
                    supportedUndefined++;
                    logHandling("Expression supported but value undefined on simple attribute " + attrName + " at " + address.toModelNode().asString());
                } else {
                    simple++;
                    logHandling("Already found an expression on simple attribute " + attrName + " at " + address.toModelNode().asString());
                }
                otherAttrs.put(attrName, attrValue);
                expectedAttrs.put(attrName, attrValue);
            }
        } else if (COMPLEX_TYPES.contains(attrType) && attrDesc.get(VALUE_TYPE).getType() != ModelType.TYPE && attrDesc.get(VALUE_TYPE).isDefined()) {
            handleComplexCollection(address, attrName, attrValue, attrType, attrDesc.get(VALUE_TYPE), expressionAttrs, otherAttrs, expectedAttrs);
        } else {
            noSimple++;
            logNoExpressions(address, attrName);
            otherAttrs.put(attrName, attrValue);
            expectedAttrs.put(attrName, attrValue);
        }
    }
}
302195.232982wildfly
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
    final DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
    if (!DeploymentTypeMarker.isType(DeploymentType.WAR, deploymentUnit)) {
        return;
    }
    final WarMetaData warMetaData = deploymentUnit.getAttachment(WarMetaData.ATTACHMENT_KEY);
    if (warMetaData == null || warMetaData.getMergedJBossWebMetaData() == null) {
        return;
    }
    TldsMetaData tldsMetaData = deploymentUnit.getAttachment(TldsMetaData.ATTACHMENT_KEY);
    if (tldsMetaData == null) {
        tldsMetaData = new TldsMetaData();
        deploymentUnit.putAttachment(TldsMetaData.ATTACHMENT_KEY, tldsMetaData);
    }
    Map<String, TldMetaData> tlds = new HashMap<String, TldMetaData>();
    tldsMetaData.setTlds(tlds);
    final List<TldMetaData> uniqueTlds = new ArrayList<>();
    final VirtualFile deploymentRoot = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT).getRoot();
    final List<VirtualFile> testRoots = new ArrayList<VirtualFile>();
    testRoots.add(deploymentRoot);
    testRoots.add(deploymentRoot.getChild(WEB_INF));
    testRoots.add(deploymentRoot.getChild(META_INF));
    for (ResourceRoot root : deploymentUnit.getAttachmentList(Attachments.RESOURCE_ROOTS)) {
        testRoots.add(root.getRoot());
        testRoots.add(root.getRoot().getChild(META_INF));
        testRoots.add(root.getRoot().getChild(META_INF).getChild(RESOURCES));
    }
    JspConfigMetaData merged = warMetaData.getMergedJBossWebMetaData().getJspConfig();
    if (merged != null && merged.getTaglibs() != null) {
        for (final TaglibMetaData tld : merged.getTaglibs()) {
            boolean found = false;
            for (final VirtualFile root : testRoots) {
                VirtualFile child = root.getChild(tld.getTaglibLocation());
                if (child.exists()) {
                    if (isTldFile(child)) {
                        TldMetaData value = processTld(deploymentRoot, child, tlds, uniqueTlds);
                        if (!tlds.containsKey(tld.getTaglibUri())) {
                            tlds.put(tld.getTaglibUri(), value);
                        }
                    }
                    found = true;
                    break;
                }
            }
            if (!found) {
                UndertowLogger.ROOT_LOGGER.tldNotFound(tld.getTaglibLocation());
            }
        }
    }
    List<ResourceRoot> resourceRoots = deploymentUnit.getAttachmentList(Attachments.RESOURCE_ROOTS);
    for (ResourceRoot resourceRoot : resourceRoots) {
        if (resourceRoot.getRoot().getName().toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
            VirtualFile webFragment = resourceRoot.getRoot().getChild(META_INF);
            if (webFragment.exists() && webFragment.isDirectory()) {
                processTlds(deploymentRoot, webFragment.getChildren(), tlds, uniqueTlds);
            }
        }
    }
    VirtualFile webInf = deploymentRoot.getChild(WEB_INF);
    if (webInf.exists() && webInf.isDirectory()) {
        for (VirtualFile file : webInf.getChildren()) {
            if (isTldFile(file)) {
                processTld(deploymentRoot, file, tlds, uniqueTlds);
            } else if (file.isDirectory() && !CLASSES.equals(file.getName()) && !LIB.equals(file.getName())) {
                processTlds(deploymentRoot, file.getChildren(), tlds, uniqueTlds);
            }
        }
    }
    JBossWebMetaData mergedMd = warMetaData.getMergedJBossWebMetaData();
    if (mergedMd.getListeners() == null) {
        mergedMd.setListeners(new ArrayList<ListenerMetaData>());
    }
    final ArrayList<TldMetaData> allTlds = new ArrayList<>(uniqueTlds);
    allTlds.addAll(tldsMetaData.getSharedTlds(deploymentUnit));
    for (final TldMetaData tld : allTlds) {
        if (tld.getListeners() != null) {
            for (ListenerMetaData l : tld.getListeners()) {
                mergedMd.getListeners().add(l);
            }
        }
    }
}
311836.722292cassandra
public static void rebuild(String sourceDc, String keyspace, String tokens, String specificSources, boolean excludeLocalDatacenterNodes) {
    if (!isRebuilding.compareAndSet(false, true)) {
        throw new IllegalStateException("Node is still rebuilding. Check nodetool netstats.");
    }
    if (sourceDc != null) {
        if (sourceDc.equals(DatabaseDescriptor.getLocalDataCenter()) && excludeLocalDatacenterNodes)
            throw new IllegalArgumentException("Cannot set source data center to be local data center, when excludeLocalDataCenter flag is set");
        Set<String> availableDCs = ClusterMetadata.current().directory.knownDatacenters();
        if (!availableDCs.contains(sourceDc)) {
            throw new IllegalArgumentException(String.format("Provided datacenter '%s' is not a valid datacenter, available datacenters are: %s", sourceDc, String.join(",", availableDCs)));
        }
    }
    try {
        if (keyspace == null && tokens != null) {
            throw new IllegalArgumentException("Cannot specify tokens without keyspace.");
        }
        logger.info("rebuild from dc: {}, {}, {}", sourceDc == null ? "(any dc)" : sourceDc, keyspace == null ? "(All keyspaces)" : keyspace, tokens == null ? "(All tokens)" : tokens);
        StorageService.instance.repairPaxosForTopologyChange("rebuild");
        ClusterMetadata metadata = ClusterMetadata.current();
        MovementMap rebuildMovements = movementMap(metadata, keyspace, tokens);
        logger.info("Rebuild movements: {}", rebuildMovements);
        RangeStreamer streamer = new RangeStreamer(metadata, StreamOperation.REBUILD, false, DatabaseDescriptor.getEndpointSnitch(), StorageService.instance.streamStateStore, false, DatabaseDescriptor.getStreamingConnectionsPerHost(), rebuildMovements, null);
        if (sourceDc != null)
            streamer.addSourceFilter(new RangeStreamer.SingleDatacenterFilter(DatabaseDescriptor.getEndpointSnitch(), sourceDc));
        if (excludeLocalDatacenterNodes)
            streamer.addSourceFilter(new RangeStreamer.ExcludeLocalDatacenterFilter(DatabaseDescriptor.getEndpointSnitch()));
        if (keyspace == null) {
            for (String keyspaceName : Schema.instance.getNonLocalStrategyKeyspaces().names()) streamer.addKeyspaceToFetch(keyspaceName);
        } else if (tokens == null) {
            streamer.addKeyspaceToFetch(keyspace);
        } else {
            if (specificSources != null) {
                String[] stringHosts = specificSources.split(",");
                Set<InetAddressAndPort> sources = new HashSet<>(stringHosts.length);
                for (String stringHost : stringHosts) {
                    try {
                        InetAddressAndPort endpoint = InetAddressAndPort.getByName(stringHost);
                        if (getBroadcastAddressAndPort().equals(endpoint)) {
                            throw new IllegalArgumentException("This host was specified as a source for rebuilding. Sources for a rebuild can only be other nodes in the cluster.");
                        }
                        sources.add(endpoint);
                    } catch (UnknownHostException ex) {
                        throw new IllegalArgumentException("Unknown host specified " + stringHost, ex);
                    }
                }
                streamer.addSourceFilter(new RangeStreamer.AllowedSourcesFilter(sources));
            }
            streamer.addKeyspaceToFetch(keyspace);
        }
        streamer.fetchAsync().get();
    } catch (InterruptedException e) {
        throw new UncheckedInterruptedException(e);
    } catch (ExecutionException e) {
        logger.error("Error while rebuilding node", e.getCause());
        throw new RuntimeException("Error while rebuilding node: " + e.getCause().getMessage());
    } finally {
        isRebuilding.set(false);
    }
}
322054.242478cassandra
protected void openComponents(BigTableReader.Builder builder, SSTable.Owner owner, boolean validate, boolean online) throws IOException {
    try {
        if (online && builder.getTableMetadataRef().getLocal().params.caching.cacheKeys())
            builder.setKeyCache(new KeyCache(CacheService.instance.keyCache));
        StatsComponent statsComponent = StatsComponent.load(descriptor, MetadataType.STATS, MetadataType.HEADER, MetadataType.VALIDATION);
        builder.setSerializationHeader(statsComponent.serializationHeader(builder.getTableMetadataRef().getLocal()));
        checkArgument(!online || builder.getSerializationHeader() != null);
        builder.setStatsMetadata(statsComponent.statsMetadata());
        if (descriptor.version.hasKeyRange() && statsComponent.statsMetadata() != null) {
            builder.setFirst(tableMetadataRef.getLocal().partitioner.decorateKey(statsComponent.statsMetadata().firstKey));
            builder.setLast(tableMetadataRef.getLocal().partitioner.decorateKey(statsComponent.statsMetadata().lastKey));
        }
        ValidationMetadata validationMetadata = statsComponent.validationMetadata();
        validatePartitioner(builder.getTableMetadataRef().getLocal(), validationMetadata);
        boolean filterNeeded = online;
        if (filterNeeded)
            builder.setFilter(loadFilter(validationMetadata));
        boolean rebuildFilter = filterNeeded && builder.getFilter() == null;
        boolean summaryNeeded = true;
        if (summaryNeeded) {
            IndexSummaryComponent summaryComponent = loadSummary();
            if (summaryComponent != null) {
                if (builder.getFirst() == null || builder.getLast() == null) {
                    builder.setFirst(summaryComponent.first);
                    builder.setLast(summaryComponent.last);
                }
                builder.setIndexSummary(summaryComponent.indexSummary);
            }
        }
        boolean rebuildSummary = summaryNeeded && builder.getIndexSummary() == null;
        if (builder.getComponents().contains(Components.PRIMARY_INDEX) && (rebuildFilter || rebuildSummary)) {
            try (FileHandle indexFile = indexFileBuilder(builder.getIndexSummary()).complete()) {
                Pair<IFilter, IndexSummaryComponent> filterAndSummary = buildSummaryAndBloomFilter(indexFile, builder.getSerializationHeader(), rebuildFilter, rebuildSummary, owner != null ? owner.getMetrics() : null);
                IFilter filter = filterAndSummary.left;
                IndexSummaryComponent summaryComponent = filterAndSummary.right;
                if (summaryComponent != null) {
                    builder.setFirst(summaryComponent.first);
                    builder.setLast(summaryComponent.last);
                    builder.setIndexSummary(summaryComponent.indexSummary);
                    if (online)
                        summaryComponent.save(descriptor.fileFor(Components.SUMMARY), false);
                }
                if (filter != null) {
                    builder.setFilter(filter);
                    if (online)
                        FilterComponent.save(filter, descriptor, false);
                }
            }
        }
        try (CompressionMetadata compressionMetadata = CompressionInfoComponent.maybeLoad(descriptor, components)) {
            builder.setDataFile(dataFileBuilder(builder.getStatsMetadata()).withCompressionMetadata(compressionMetadata).withCrcCheckChance(() -> tableMetadataRef.getLocal().params.crcCheckChance).complete());
        }
        if (builder.getFilter() == null)
            builder.setFilter(FilterFactory.AlwaysPresent);
        if (builder.getComponents().contains(Components.PRIMARY_INDEX))
            builder.setIndexFile(indexFileBuilder(builder.getIndexSummary()).complete());
    } catch (IOException | RuntimeException | Error ex) {
        Throwables.closeNonNullAndAddSuppressed(ex, builder.getDataFile(), builder.getIndexFile(), builder.getFilter(), builder.getIndexSummary());
        throw ex;
    }
}
331054.663968cassandra
 static int skipCQLValue(String toParse, int idx) {
    if (idx >= toParse.length())
        throw new IllegalArgumentException();
    if (isBlank(toParse.charAt(idx)))
        throw new IllegalArgumentException();
    int cbrackets = 0;
    int sbrackets = 0;
    int parens = 0;
    boolean inString = false;
    do {
        char c = toParse.charAt(idx);
        if (inString) {
            if (c == '\'') {
                if (idx + 1 < toParse.length() && toParse.charAt(idx + 1) == '\'') {
                    ++idx;
                } else {
                    inString = false;
                    if (cbrackets == 0 && sbrackets == 0 && parens == 0)
                        return idx + 1;
                }
            }
        } else if (c == '\'') {
            inString = true;
        } else if (c == '{') {
            ++cbrackets;
        } else if (c == '[') {
            ++sbrackets;
        } else if (c == '(') {
            ++parens;
        } else if (c == '}') {
            if (cbrackets == 0)
                return idx;
            --cbrackets;
            if (cbrackets == 0 && sbrackets == 0 && parens == 0)
                return idx + 1;
        } else if (c == ']') {
            if (sbrackets == 0)
                return idx;
            --sbrackets;
            if (cbrackets == 0 && sbrackets == 0 && parens == 0)
                return idx + 1;
        } else if (c == ')') {
            if (parens == 0)
                return idx;
            --parens;
            if (cbrackets == 0 && sbrackets == 0 && parens == 0)
                return idx + 1;
        } else if (isBlank(c) || !isIdentifierChar(c)) {
            if (cbrackets == 0 && sbrackets == 0 && parens == 0)
                return idx;
        }
    } while (++idx < toParse.length());
    if (inString || cbrackets != 0 || sbrackets != 0 || parens != 0)
        throw new IllegalArgumentException();
    return idx;
}
341678.22265cassandra
private Collection<SSTableReader> getCandidatesFor(int level) {
    assert !generations.get(level).isEmpty();
    logger.trace("Choosing candidates for L{}", level);
    final Set<SSTableReader> compacting = cfs.getTracker().getCompacting();
    if (level == 0) {
        Set<SSTableReader> compactingL0 = getCompactingL0();
        PartitionPosition lastCompactingKey = null;
        PartitionPosition firstCompactingKey = null;
        for (SSTableReader candidate : compactingL0) {
            if (firstCompactingKey == null || candidate.getFirst().compareTo(firstCompactingKey) < 0)
                firstCompactingKey = candidate.getFirst();
            if (lastCompactingKey == null || candidate.getLast().compareTo(lastCompactingKey) > 0)
                lastCompactingKey = candidate.getLast();
        }
        Set<SSTableReader> candidates = new HashSet<>();
        Map<SSTableReader, Bounds<Token>> remaining = genBounds(Iterables.filter(generations.get(0), Predicates.not(SSTableReader::isMarkedSuspect)));
        for (SSTableReader sstable : ageSortedSSTables(remaining.keySet())) {
            if (candidates.contains(sstable))
                continue;
            Sets.SetView<SSTableReader> overlappedL0 = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, remaining));
            if (!Sets.intersection(overlappedL0, compactingL0).isEmpty())
                continue;
            for (SSTableReader newCandidate : overlappedL0) {
                if (firstCompactingKey == null || lastCompactingKey == null || overlapping(firstCompactingKey.getToken(), lastCompactingKey.getToken(), Collections.singleton(newCandidate)).size() == 0)
                    candidates.add(newCandidate);
                remaining.remove(newCandidate);
            }
            if (candidates.size() > cfs.getMaximumCompactionThreshold()) {
                candidates = new HashSet<>(ageSortedSSTables(candidates).subList(0, cfs.getMaximumCompactionThreshold()));
                break;
            }
        }
        if (SSTableReader.getTotalBytes(candidates) > maxSSTableSizeInBytes) {
            Set<SSTableReader> l1overlapping = overlapping(candidates, generations.get(1));
            if (Sets.intersection(l1overlapping, compacting).size() > 0)
                return Collections.emptyList();
            if (!overlapping(candidates, compactingL0).isEmpty())
                return Collections.emptyList();
            candidates = Sets.union(candidates, l1overlapping);
        }
        if (candidates.size() < 2)
            return Collections.emptyList();
        else
            return candidates;
    }
    Map<SSTableReader, Bounds<Token>> sstablesNextLevel = genBounds(generations.get(level + 1));
    Iterator<SSTableReader> levelIterator = generations.wrappingIterator(level, lastCompactedSSTables[level]);
    while (levelIterator.hasNext()) {
        SSTableReader sstable = levelIterator.next();
        Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable), overlappingWithBounds(sstable, sstablesNextLevel));
        if (Iterables.any(candidates, SSTableReader::isMarkedSuspect))
            continue;
        if (Sets.intersection(candidates, compacting).isEmpty())
            return candidates;
    }
    return Collections.emptyList();
}
351379.121867cassandra
public static Map<String, String> validateOptions(Map<String, String> options, Map<String, String> uncheckedOptions) throws ConfigurationException {
    String optionValue = options.get(TIMESTAMP_RESOLUTION_KEY);
    try {
        if (optionValue != null)
            if (!validTimestampTimeUnits.contains(TimeUnit.valueOf(optionValue)))
                throw new ConfigurationException(String.format("%s is not valid for %s", optionValue, TIMESTAMP_RESOLUTION_KEY));
    } catch (IllegalArgumentException e) {
        throw new ConfigurationException(String.format("%s is not valid for %s", optionValue, TIMESTAMP_RESOLUTION_KEY));
    }
    optionValue = options.get(COMPACTION_WINDOW_UNIT_KEY);
    try {
        if (optionValue != null)
            if (!validWindowTimeUnits.contains(TimeUnit.valueOf(optionValue)))
                throw new ConfigurationException(String.format("%s is not valid for %s", optionValue, COMPACTION_WINDOW_UNIT_KEY));
    } catch (IllegalArgumentException e) {
        throw new ConfigurationException(String.format("%s is not valid for %s", optionValue, COMPACTION_WINDOW_UNIT_KEY), e);
    }
    optionValue = options.get(COMPACTION_WINDOW_SIZE_KEY);
    try {
        int sstableWindowSize = optionValue == null ? DEFAULT_COMPACTION_WINDOW_SIZE : Integer.parseInt(optionValue);
        if (sstableWindowSize < 1) {
            throw new ConfigurationException(String.format("%d must be greater than 1 for %s", sstableWindowSize, COMPACTION_WINDOW_SIZE_KEY));
        }
    } catch (NumberFormatException e) {
        throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", optionValue, COMPACTION_WINDOW_SIZE_KEY), e);
    }
    optionValue = options.get(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY);
    try {
        long expiredCheckFrequency = optionValue == null ? DEFAULT_EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS : Long.parseLong(optionValue);
        if (expiredCheckFrequency < 0) {
            throw new ConfigurationException(String.format("%s must not be negative, but was %d", EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY, expiredCheckFrequency));
        }
    } catch (NumberFormatException e) {
        throw new ConfigurationException(String.format("%s is not a parsable int (base10) for %s", optionValue, EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY), e);
    }
    optionValue = options.get(UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_KEY);
    if (optionValue != null) {
        if (!(optionValue.equalsIgnoreCase("true") || optionValue.equalsIgnoreCase("false")))
            throw new ConfigurationException(String.format("%s is not 'true' or 'false' (%s)", UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_KEY, optionValue));
        if (optionValue.equalsIgnoreCase("true") && !UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_ENABLED)
            throw new ConfigurationException(String.format("%s is requested but not allowed, restart cassandra with -D%s=true to allow it", UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_KEY, ALLOW_UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION.getKey()));
    }
    uncheckedOptions.remove(COMPACTION_WINDOW_SIZE_KEY);
    uncheckedOptions.remove(COMPACTION_WINDOW_UNIT_KEY);
    uncheckedOptions.remove(TIMESTAMP_RESOLUTION_KEY);
    uncheckedOptions.remove(EXPIRED_SSTABLE_CHECK_FREQUENCY_SECONDS_KEY);
    uncheckedOptions.remove(UNSAFE_AGGRESSIVE_SSTABLE_EXPIRATION_KEY);
    uncheckedOptions = SizeTieredCompactionStrategyOptions.validateOptions(options, uncheckedOptions);
    return uncheckedOptions;
}
361407.711960cassandra
public static void applyAuth() {
    if (initialized)
        return;
    initialized = true;
    Config conf = DatabaseDescriptor.getRawConfig();
    IAuthenticator authenticator = new AllowAllAuthenticator();
    if (conf.authenticator != null) {
        authenticator = ParameterizedClass.newInstance(conf.authenticator, Arrays.asList("", AuthConfig.class.getPackage().getName()));
    }
    if (!(authenticator instanceof PasswordAuthenticator || authenticator instanceof MutualTlsAuthenticator) && (conf.credentials_update_interval != null || conf.credentials_validity.toMilliseconds() != 2000 || conf.credentials_cache_max_entries != 1000)) {
        logger.info("Configuration options credentials_update_interval, credentials_validity and " + "credentials_cache_max_entries may not be applicable for the configured authenticator ({})", authenticator.getClass().getName());
    }
    DatabaseDescriptor.setAuthenticator(authenticator);
    IAuthorizer authorizer = new AllowAllAuthorizer();
    if (conf.authorizer != null)
        authorizer = FBUtilities.newAuthorizer(conf.authorizer);
    if (!authenticator.requireAuthentication() && authorizer.requireAuthorization())
        throw new ConfigurationException(conf.authenticator.class_name + " can't be used with " + conf.authorizer, false);
    DatabaseDescriptor.setAuthorizer(authorizer);
    IRoleManager roleManager;
    if (conf.role_manager != null)
        roleManager = FBUtilities.newRoleManager(conf.role_manager);
    else
        roleManager = new CassandraRoleManager();
    if (authenticator instanceof PasswordAuthenticator && !(roleManager instanceof CassandraRoleManager))
        throw new ConfigurationException("CassandraRoleManager must be used with PasswordAuthenticator", false);
    DatabaseDescriptor.setRoleManager(roleManager);
    if (conf.internode_authenticator != null) {
        DatabaseDescriptor.setInternodeAuthenticator(ParameterizedClass.newInstance(conf.internode_authenticator, Arrays.asList("", AuthConfig.class.getPackage().getName())));
    }
    INetworkAuthorizer networkAuthorizer = FBUtilities.newNetworkAuthorizer(conf.network_authorizer);
    DatabaseDescriptor.setNetworkAuthorizer(networkAuthorizer);
    if (networkAuthorizer.requireAuthorization() && !authenticator.requireAuthentication()) {
        throw new ConfigurationException(conf.network_authorizer + " can't be used with " + conf.authenticator.class_name, false);
    }
    ICIDRAuthorizer cidrAuthorizer = ICIDRAuthorizer.newCIDRAuthorizer(conf.cidr_authorizer);
    DatabaseDescriptor.setCIDRAuthorizer(cidrAuthorizer);
    if (cidrAuthorizer.requireAuthorization() && !authenticator.requireAuthentication()) {
        throw new ConfigurationException(conf.cidr_authorizer + " can't be used with " + conf.authenticator, false);
    }
    authenticator.validateConfiguration();
    authorizer.validateConfiguration();
    roleManager.validateConfiguration();
    networkAuthorizer.validateConfiguration();
    cidrAuthorizer.validateConfiguration();
    DatabaseDescriptor.getInternodeAuthenticator().validateConfiguration();
}
371033.71266cassandra
public static CommitLogArchiver construct() {
    Properties commitlog_commands = new Properties();
    try (InputStream stream = CommitLogArchiver.class.getClassLoader().getResourceAsStream("commitlog_archiving.properties")) {
        if (stream == null) {
            logger.trace("No commitlog_archiving properties found; archive + pitr will be disabled");
            return disabled();
        } else {
            commitlog_commands.load(stream);
            String archiveCommand = commitlog_commands.getProperty("archive_command");
            String restoreCommand = commitlog_commands.getProperty("restore_command");
            String restoreDirectories = commitlog_commands.getProperty("restore_directories");
            if (restoreDirectories != null && !restoreDirectories.isEmpty()) {
                for (String dir : restoreDirectories.split(DELIMITER)) {
                    File directory = new File(dir);
                    if (!directory.exists()) {
                        if (!directory.tryCreateDirectory()) {
                            throw new RuntimeException("Unable to create directory: " + dir);
                        }
                    }
                }
            }
            String targetTime = commitlog_commands.getProperty("restore_point_in_time");
            TimeUnit precision = TimeUnit.valueOf(commitlog_commands.getProperty("precision", "MICROSECONDS"));
            long restorePointInTime;
            try {
                restorePointInTime = Strings.isNullOrEmpty(targetTime) ? Long.MAX_VALUE : format.parse(targetTime).getTime();
            } catch (ParseException e) {
                throw new RuntimeException("Unable to parse restore target time", e);
            }
            String snapshotPosition = commitlog_commands.getProperty("snapshot_commitlog_position");
            CommitLogPosition snapshotCommitLogPosition;
            try {
                snapshotCommitLogPosition = Strings.isNullOrEmpty(snapshotPosition) ? CommitLogPosition.NONE : CommitLogPosition.serializer.fromString(snapshotPosition);
            } catch (ParseException | NumberFormatException e) {
                throw new RuntimeException("Unable to parse snapshot commit log position", e);
            }
            return new CommitLogArchiver(archiveCommand, restoreCommand, restoreDirectories, restorePointInTime, snapshotCommitLogPosition, precision);
        }
    } catch (IOException e) {
        throw new RuntimeException("Unable to load commitlog_archiving.properties", e);
    }
}
381053.131161cassandra
public void addToRowFilter(RowFilter filter, IndexRegistry indexRegistry, QueryOptions options) {
    if (isOnToken())
        throw new UnsupportedOperationException();
    switch(columnsExpression.kind()) {
        case SINGLE_COLUMN:
            List<ByteBuffer> buffers = bindAndGet(options);
            ColumnMetadata column = firstColumn();
            if (operator == Operator.IN) {
                filter.add(column, operator, inValues(column, buffers));
            } else if (operator == Operator.LIKE) {
                LikePattern pattern = LikePattern.parse(buffers.get(0));
                RowFilter.SimpleExpression expression = filter.add(column, pattern.kind().operator(), pattern.value());
                indexRegistry.getBestIndexFor(expression).orElseThrow(() -> invalidRequest("%s is only supported on properly indexed columns", expression));
            } else {
                filter.add(column, operator, buffers.get(0));
            }
            break;
        case MULTI_COLUMN:
            checkFalse(isSlice(), "Multi-column slice restrictions cannot be used for filtering.");
            if (isEQ()) {
                List<ByteBuffer> elements = bindAndGetElements(options).get(0);
                for (int i = 0, m = columns().size(); i < m; i++) {
                    ColumnMetadata columnDef = columns().get(i);
                    filter.add(columnDef, Operator.EQ, elements.get(i));
                }
            } else if (isIN()) {
                if (columns().size() == 1) {
                    List<ByteBuffer> values = bindAndGetElements(options).stream().map(elements -> elements.get(0)).collect(Collectors.toList());
                    filter.add(firstColumn(), Operator.IN, inValues(firstColumn(), values));
                } else {
                    throw invalidRequest("Multicolumn IN filters are not supported");
                }
            }
            break;
        case MAP_ELEMENT:
            ByteBuffer key = columnsExpression.mapKey(options);
            List<ByteBuffer> values = bindAndGet(options);
            filter.addMapEquality(firstColumn(), key, operator, values.get(0));
            break;
        default:
            throw new UnsupportedOperationException();
    }
}
39818.841657cassandra
private static Function pickBestMatch(String keyspace, FunctionName name, List<? extends AssignmentTestable> providedArgs, String receiverKeyspace, String receiverTable, AbstractType<?> receiverType, Collection<Function> candidates) {
    List<Function> compatibles = null;
    for (Function toTest : candidates) {
        if (matchReturnType(toTest, receiverType)) {
            AssignmentTestable.TestResult r = matchAguments(keyspace, toTest, providedArgs, receiverKeyspace, receiverTable);
            switch(r) {
                case EXACT_MATCH:
                    return toTest;
                case WEAKLY_ASSIGNABLE:
                    if (compatibles == null)
                        compatibles = new ArrayList<>();
                    compatibles.add(toTest);
                    break;
            }
        }
    }
    if (compatibles == null) {
        if (OperationFcts.isOperation(name))
            throw invalidRequest("the '%s' operation is not supported between %s and %s", OperationFcts.getOperator(name), providedArgs.get(0), providedArgs.get(1));
        throw invalidRequest("Invalid call to function %s, none of its type signatures match (known type signatures: %s)", name, format(candidates));
    }
    if (compatibles.size() > 1) {
        if (OperationFcts.isOperation(name)) {
            if (receiverType != null && !containsMarkers(providedArgs)) {
                for (Function toTest : compatibles) {
                    List<AbstractType<?>> argTypes = toTest.argTypes();
                    if (receiverType.equals(argTypes.get(0)) && receiverType.equals(argTypes.get(1)))
                        return toTest;
                }
            }
            throw invalidRequest("Ambiguous '%s' operation with args %s and %s: use type hint to disambiguate, example '(int) ?'", OperationFcts.getOperator(name), providedArgs.get(0), providedArgs.get(1));
        }
        if (OperationFcts.isNegation(name))
            throw invalidRequest("Ambiguous negation: use type casts to disambiguate");
        throw invalidRequest("Ambiguous call to function %s (can be matched by following signatures: %s): use type casts to disambiguate", name, format(compatibles));
    }
    return compatibles.get(0);
}
40837.741552cassandra
public void validate() {
    for (Map.Entry<IRoleManager.Option, Object> option : options.entrySet()) {
        if (!DatabaseDescriptor.getRoleManager().supportedOptions().contains(option.getKey()))
            throw new InvalidRequestException(String.format("%s doesn't support %s", DatabaseDescriptor.getRoleManager().getClass().getName(), option.getKey()));
        switch(option.getKey()) {
            case LOGIN:
            case SUPERUSER:
                if (!(option.getValue() instanceof Boolean))
                    throw new InvalidRequestException(String.format("Invalid value for property '%s'. " + "It must be a boolean", option.getKey()));
                break;
            case PASSWORD:
                if (!(option.getValue() instanceof String))
                    throw new InvalidRequestException(String.format("Invalid value for property '%s'. " + "It must be a string", option.getKey()));
                if (options.containsKey(IRoleManager.Option.HASHED_PASSWORD))
                    throw new InvalidRequestException(String.format("Properties '%s' and '%s' are mutually exclusive", IRoleManager.Option.PASSWORD, IRoleManager.Option.HASHED_PASSWORD));
                break;
            case HASHED_PASSWORD:
                if (!(option.getValue() instanceof String))
                    throw new InvalidRequestException(String.format("Invalid value for property '%s'. " + "It must be a string", option.getKey()));
                if (options.containsKey(IRoleManager.Option.PASSWORD))
                    throw new InvalidRequestException(String.format("Properties '%s' and '%s' are mutually exclusive", IRoleManager.Option.PASSWORD, IRoleManager.Option.HASHED_PASSWORD));
                try {
                    BCrypt.checkpw("dummy", (String) option.getValue());
                } catch (Exception e) {
                    throw new InvalidRequestException("Invalid hashed password value. Please use jBcrypt.");
                }
                break;
            case OPTIONS:
                if (!(option.getValue() instanceof Map))
                    throw new InvalidRequestException(String.format("Invalid value for property '%s'. " + "It must be a map", option.getKey()));
                break;
        }
    }
}
411178.661738cassandra
public AbstractBounds<PartitionPosition> bounds(IPartitioner partitioner, QueryOptions options) {
    if (isOnToken()) {
        RangeSet<Token> tokenRangeSet = toRangeSet(partitioner, tokenRestrictions, options);
        Set<Range<Token>> ranges = tokenRangeSet.asRanges();
        if (ranges.isEmpty())
            return null;
        assert ranges.size() == 1;
        Range<Token> range = ranges.iterator().next();
        Token startToken = range.hasLowerBound() ? range.lowerEndpoint() : partitioner.getMinimumToken();
        Token endToken = range.hasUpperBound() ? range.upperEndpoint() : partitioner.getMinimumToken();
        boolean includeStart = range.hasLowerBound() && range.lowerBoundType() == BoundType.CLOSED;
        boolean includeEnd = range.hasUpperBound() && range.upperBoundType() == BoundType.CLOSED;
        int cmp = startToken.compareTo(endToken);
        if (!startToken.isMinimum() && !endToken.isMinimum() && (cmp > 0 || (cmp == 0 && (!includeStart || !includeEnd))))
            return null;
        PartitionPosition start = includeStart ? startToken.minKeyBound() : startToken.maxKeyBound();
        PartitionPosition end = includeEnd ? endToken.maxKeyBound() : endToken.minKeyBound();
        return new org.apache.cassandra.dht.Range<>(start, end);
    }
    if (restrictions.isEmpty())
        return new Bounds<>(partitioner.getMinimumToken().minKeyBound(), partitioner.getMinimumToken().minKeyBound());
    if (needFiltering())
        return new org.apache.cassandra.dht.Range<>(partitioner.getMinimumToken().minKeyBound(), partitioner.getMinimumToken().maxKeyBound());
    ByteBuffer partitionKey = nonTokenRestrictionValues(options, null).get(0);
    PartitionPosition position = PartitionPosition.ForKey.get(partitionKey, partitioner);
    return new Bounds<>(position, position);
}
42784.971940cassandra
private TableParams build(TableParams.Builder builder) {
    if (hasOption(ALLOW_AUTO_SNAPSHOT))
        builder.allowAutoSnapshot(getBoolean(ALLOW_AUTO_SNAPSHOT.toString(), true));
    if (hasOption(BLOOM_FILTER_FP_CHANCE))
        builder.bloomFilterFpChance(getDouble(BLOOM_FILTER_FP_CHANCE));
    if (hasOption(CACHING))
        builder.caching(CachingParams.fromMap(getMap(CACHING)));
    if (hasOption(COMMENT))
        builder.comment(getString(COMMENT));
    if (hasOption(COMPACTION))
        builder.compaction(CompactionParams.fromMap(getMap(COMPACTION)));
    if (hasOption(COMPRESSION))
        builder.compression(CompressionParams.fromMap(getMap(COMPRESSION)));
    if (hasOption(Option.MEMTABLE))
        builder.memtable(MemtableParams.getWithFallback(getString(Option.MEMTABLE)));
    if (hasOption(DEFAULT_TIME_TO_LIVE))
        builder.defaultTimeToLive(getInt(DEFAULT_TIME_TO_LIVE));
    if (hasOption(GC_GRACE_SECONDS))
        builder.gcGraceSeconds(getInt(GC_GRACE_SECONDS));
    if (hasOption(INCREMENTAL_BACKUPS))
        builder.incrementalBackups(getBoolean(INCREMENTAL_BACKUPS.toString(), true));
    if (hasOption(MAX_INDEX_INTERVAL))
        builder.maxIndexInterval(getInt(MAX_INDEX_INTERVAL));
    if (hasOption(MEMTABLE_FLUSH_PERIOD_IN_MS))
        builder.memtableFlushPeriodInMs(getInt(MEMTABLE_FLUSH_PERIOD_IN_MS));
    if (hasOption(MIN_INDEX_INTERVAL))
        builder.minIndexInterval(getInt(MIN_INDEX_INTERVAL));
    if (hasOption(SPECULATIVE_RETRY))
        builder.speculativeRetry(SpeculativeRetryPolicy.fromString(getString(SPECULATIVE_RETRY)));
    if (hasOption(ADDITIONAL_WRITE_POLICY))
        builder.additionalWritePolicy(SpeculativeRetryPolicy.fromString(getString(ADDITIONAL_WRITE_POLICY)));
    if (hasOption(CRC_CHECK_CHANCE))
        builder.crcCheckChance(getDouble(CRC_CHECK_CHANCE));
    if (hasOption(CDC))
        builder.cdc(getBoolean(CDC));
    if (hasOption(READ_REPAIR))
        builder.readRepair(ReadRepairStrategy.fromString(getString(READ_REPAIR)));
    return builder.build();
}
43635.55843cassandra
 Throwable updateSizeTracking(Iterable<SSTableReader> oldSSTables, Iterable<SSTableReader> newSSTables, Throwable accumulate) {
    if (isDummy())
        return accumulate;
    long add = 0;
    long addUncompressed = 0;
    for (SSTableReader sstable : newSSTables) {
        if (logger.isTraceEnabled())
            logger.trace("adding {} to list of files tracked for {}.{}", sstable.descriptor, cfstore.getKeyspaceName(), cfstore.name);
        try {
            add += sstable.bytesOnDisk();
            addUncompressed += sstable.logicalBytesOnDisk();
        } catch (Throwable t) {
            accumulate = merge(accumulate, t);
        }
    }
    long subtract = 0;
    long subtractUncompressed = 0;
    for (SSTableReader sstable : oldSSTables) {
        if (logger.isTraceEnabled())
            logger.trace("removing {} from list of files tracked for {}.{}", sstable.descriptor, cfstore.getKeyspaceName(), cfstore.name);
        try {
            subtract += sstable.bytesOnDisk();
            subtractUncompressed += sstable.logicalBytesOnDisk();
        } catch (Throwable t) {
            accumulate = merge(accumulate, t);
        }
    }
    StorageMetrics.load.inc(add - subtract);
    StorageMetrics.uncompressedLoad.inc(addUncompressed - subtractUncompressed);
    cfstore.metric.liveDiskSpaceUsed.inc(add - subtract);
    cfstore.metric.uncompressedLiveDiskSpaceUsed.inc(addUncompressed - subtractUncompressed);
    cfstore.metric.totalDiskSpaceUsed.inc(add);
    return accumulate;
}
44679.081138cassandra
public Map<Byte, AbstractType<?>> getAliasParameters() throws SyntaxException, ConfigurationException {
    Map<Byte, AbstractType<?>> map = new HashMap<>();
    if (isEOS())
        return map;
    if (str.charAt(idx) != '(')
        throw new IllegalStateException();
    ++idx;
    while (skipBlankAndComma()) {
        if (str.charAt(idx) == ')') {
            ++idx;
            return map;
        }
        String alias = readNextIdentifier();
        if (alias.length() != 1)
            throwSyntaxError("An alias should be a single character: '" + alias + "', string: " + str);
        char aliasChar = alias.charAt(0);
        if (aliasChar < 33 || aliasChar > 127)
            throwSyntaxError("An alias should be a single character in [0..9a..bA..B-+._&]");
        skipBlank();
        if (!(str.charAt(idx) == '=' && str.charAt(idx + 1) == '>'))
            throwSyntaxError("expecting '=>' token");
        idx += 2;
        skipBlank();
        try {
            map.put((byte) aliasChar, parse());
        } catch (SyntaxException e) {
            SyntaxException ex = new SyntaxException(String.format("Exception while parsing '%s' around char %d", str, idx));
            ex.initCause(e);
            throw ex;
        }
    }
    throw new SyntaxException(String.format("Syntax error parsing '%s' at char %d: unexpected end of string", str, idx));
}
45442.2840cassandra
private static IncludeExcludeHolder loadInputSets(String includedInput, String excludedInput) {
    final ImmutableSet<String> excludedSet;
    if (StringUtils.isEmpty(excludedInput)) {
        excludedSet = EMPTY_FILTERS;
    } else {
        String[] excludes = excludedInput.split(",");
        ImmutableSet.Builder<String> builder = ImmutableSet.builderWithExpectedSize(excludes.length);
        for (String exclude : excludes) {
            if (!exclude.isEmpty()) {
                builder.add(exclude);
            }
        }
        excludedSet = builder.build();
    }
    final ImmutableSet<String> includedSet;
    if (StringUtils.isEmpty(includedInput)) {
        includedSet = EMPTY_FILTERS;
    } else {
        String[] includes = includedInput.split(",");
        ImmutableSet.Builder<String> builder = ImmutableSet.builderWithExpectedSize(includes.length);
        for (String include : includes) {
            if (!include.isEmpty() && !excludedSet.contains(include)) {
                builder.add(include);
            }
        }
        includedSet = builder.build();
    }
    return new IncludeExcludeHolder(includedSet, excludedSet);
}
4620.681485cassandra
public static Factory newFactory(final AbstractType<?> type, final Map<FieldIdentifier, Factory> factories) {
    return new Factory() {

        protected String getColumnName() {
            return UserTypes.userTypeToString(factories, Factory::getColumnName);
        }

        protected AbstractType<?> getReturnType() {
            return type;
        }

        protected final void addColumnMapping(SelectionColumnMapping mapping, ColumnSpecification resultsColumn) {
            SelectionColumnMapping tmpMapping = SelectionColumnMapping.newMapping();
            for (Factory factory : factories.values()) {
                factory.addColumnMapping(tmpMapping, resultsColumn);
            }
            if (tmpMapping.getMappings().get(resultsColumn).isEmpty())
                mapping.addMapping(resultsColumn, (ColumnMetadata) null);
            else
                mapping.addMapping(resultsColumn, tmpMapping.getMappings().values());
        }

        public Selector newInstance(final QueryOptions options) {
            Map<FieldIdentifier, Selector> fields = new HashMap<>(factories.size());
            for (Entry<FieldIdentifier, Factory> factory : factories.entrySet()) fields.put(factory.getKey(), factory.getValue().newInstance(options));
            return new UserTypeSelector(type, fields);
        }

        @Override
        public boolean isAggregateSelectorFactory() {
            for (Factory factory : factories.values()) {
                if (factory.isAggregateSelectorFactory())
                    return true;
            }
            return false;
        }

        @Override
        public void addFunctionsTo(List<Function> functions) {
            for (Factory factory : factories.values()) factory.addFunctionsTo(functions);
        }

        @Override
        public boolean isWritetimeSelectorFactory() {
            for (Factory factory : factories.values()) {
                if (factory.isWritetimeSelectorFactory())
                    return true;
            }
            return false;
        }

        @Override
        public boolean isTTLSelectorFactory() {
            for (Factory factory : factories.values()) {
                if (factory.isTTLSelectorFactory())
                    return true;
            }
            return false;
        }

        @Override
        boolean areAllFetchedColumnsKnown() {
            for (Factory factory : factories.values()) {
                if (!factory.areAllFetchedColumnsKnown())
                    return false;
            }
            return true;
        }

        @Override
        void addFetchedColumns(Builder builder) {
            for (Factory factory : factories.values()) factory.addFetchedColumns(builder);
        }
    };
}
47679.4229cassandra
private static List<AuditLogEntry> buildEntriesForBatch(List<? extends CQLStatement> statements, List<String> queries, QueryState state, QueryOptions options, long queryStartTimeMillis) {
    List<AuditLogEntry> auditLogEntries = new ArrayList<>(statements.size() + 1);
    UUID batchId = UUID.randomUUID();
    String queryString = String.format("BatchId:[%s] - BATCH of [%d] statements", batchId, statements.size());
    AuditLogEntry entry = new AuditLogEntry.Builder(state).setOperation(queryString).setOptions(options).setTimestamp(queryStartTimeMillis).setBatch(batchId).setType(AuditLogEntryType.BATCH).build();
    auditLogEntries.add(entry);
    for (int i = 0; i < statements.size(); i++) {
        CQLStatement statement = statements.get(i);
        entry = new AuditLogEntry.Builder(state).setType(statement.getAuditLogContext().auditLogEntryType).setOperation(queries.get(i)).setTimestamp(queryStartTimeMillis).setScope(statement).setKeyspace(state, statement).setOptions(options).setBatch(batchId).build();
        auditLogEntries.add(entry);
    }
    return auditLogEntries;
}
48420.43332cassandra
public void revokeAllFrom(RoleResource revokee) {
    try {
        UntypedResultSet rows = process(String.format("SELECT resource FROM %s.%s WHERE role = '%s'", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS, escape(revokee.getRoleName())), authReadConsistencyLevel());
        List<CQLStatement> statements = new ArrayList<>();
        for (UntypedResultSet.Row row : rows) {
            statements.add(QueryProcessor.getStatement(String.format("DELETE FROM %s.%s WHERE resource = '%s' AND role = '%s'", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.RESOURCE_ROLE_INDEX, escape(row.getString("resource")), escape(revokee.getRoleName())), ClientState.forInternalCalls()));
        }
        statements.add(QueryProcessor.getStatement(String.format("DELETE FROM %s.%s WHERE role = '%s'", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS, escape(revokee.getRoleName())), ClientState.forInternalCalls()));
        executeLoggedBatch(statements);
    } catch (RequestExecutionException | RequestValidationException e) {
        logger.warn(String.format("CassandraAuthorizer failed to revoke all permissions of %s", revokee.getRoleName()), e);
    }
}
49385.26629cassandra
protected boolean authenticateInternodeWithMtls(InetAddress remoteAddress, int remotePort, Certificate[] certificates, IInternodeAuthenticator.InternodeConnectionDirection connectionType) {
    if (connectionType == IInternodeAuthenticator.InternodeConnectionDirection.INBOUND) {
        String identity = certificateValidator.identity(certificates);
        if (!certificateValidator.isValidCertificate(certificates)) {
            noSpamLogger.error("Not a valid certificate from {}:{} with identity '{}'", remoteAddress, remotePort, identity);
            return false;
        }
        if (!trustedIdentities.contains(identity)) {
            noSpamLogger.error("Unable to authenticate user {}", identity);
            return false;
        }
        int minutesToCertificateExpiration = certificateValidityPeriodValidator.validate(certificates);
        if (certificateValidityWarnThreshold != null && minutesToCertificateExpiration < certificateValidityWarnThreshold.toMinutes()) {
            noSpamLogger.warn("Certificate from {}:{} with identity '{}' will expire in {}", remoteAddress, remotePort, identity, MutualTlsUtil.toHumanReadableCertificateExpiration(minutesToCertificateExpiration));
        }
        MutualTlsMetrics.instance.internodeCertificateExpirationDays.update(MutualTlsUtil.minutesToDays(minutesToCertificateExpiration));
        return true;
    }
    return true;
}
50549.191021cassandra
public Set<PermissionDetails> list(AuthenticatedUser performer, Set<Permission> permissions, IResource resource, RoleResource grantee) throws RequestValidationException, RequestExecutionException {
    if (!performer.isSuper() && !performer.isSystem() && !performer.getRoles().contains(grantee) && !performer.getPermissions(RoleResource.root()).contains(Permission.DESCRIBE) && (grantee == null || !performer.getPermissions(grantee).contains(Permission.DESCRIBE)))
        throw new UnauthorizedException(String.format("You are not authorized to view %s's permissions", grantee == null ? "everyone" : grantee.getRoleName()));
    if (null == grantee)
        return listPermissionsForRole(permissions, resource, null);
    Set<RoleResource> roles = DatabaseDescriptor.getRoleManager().getRoles(grantee, true);
    Set<PermissionDetails> details = new HashSet<>();
    for (RoleResource role : roles) details.addAll(listPermissionsForRole(permissions, resource, role));
    return details;
}
51455.0721cassandra
public void executeFailure(CQLStatement statement, String query, QueryOptions options, QueryState state, Exception cause) {
    AuditLogEntry entry = null;
    if (cause instanceof PreparedQueryNotFoundException) {
        entry = new AuditLogEntry.Builder(state).setOperation(query == null ? "null" : query).setOptions(options).build();
    } else if (statement != null) {
        entry = new AuditLogEntry.Builder(state).setOperation(query == null ? statement.toString() : query).setType(statement.getAuditLogContext().auditLogEntryType).setScope(statement).setKeyspace(state, statement).setOptions(options).build();
    }
    if (entry != null)
        log(entry, cause, query == null ? null : ImmutableList.of(query));
}
52320.54422cassandra
private Set<Permission> getExistingPermissions(String roleName, String resourceName, Set<Permission> expectedPermissions) {
    UntypedResultSet rs = process(String.format("SELECT permissions FROM %s.%s WHERE role = '%s' AND resource = '%s'", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_PERMISSIONS, roleName, resourceName), ConsistencyLevel.LOCAL_ONE);
    if (rs.isEmpty())
        return Collections.emptySet();
    Row one = rs.one();
    Set<Permission> existingPermissions = Sets.newHashSetWithExpectedSize(expectedPermissions.size());
    for (String permissionName : one.getSet("permissions", UTF8Type.instance)) {
        Permission permission = Permission.valueOf(permissionName);
        if (expectedPermissions.contains(permission))
            existingPermissions.add(permission);
    }
    return existingPermissions;
}
53333.41420cassandra
public int validate(Certificate[] certificates) throws AuthenticationException {
    X509Certificate[] x509Certificates = MutualTlsUtil.castCertsToX509(certificates);
    if (x509Certificates == null || x509Certificates.length == 0) {
        return -1;
    }
    Date notAfter = x509Certificates[0].getNotAfter();
    int minutesToCertificateExpiration = (int) ChronoUnit.MINUTES.between(FBUtilities.now(), notAfter.toInstant());
    int certificateValidityPeriodMinutes = certificateValidityPeriodInMinutes(x509Certificates[0]);
    if (certificateValidityPeriodMinutes > maxCertificateValidityPeriodMinutes) {
        String errorMessage = String.format("The validity period of the provided certificate (%s) exceeds " + "the maximum allowed validity period of %s", MutualTlsUtil.toHumanReadableCertificateExpiration(certificateValidityPeriodMinutes), MutualTlsUtil.toHumanReadableCertificateExpiration(maxCertificateValidityPeriodMinutes));
        throw new AuthenticationException(errorMessage);
    }
    return minutesToCertificateExpiration;
}
54309.07319cassandra
public void grantRole(AuthenticatedUser performer, RoleResource role, RoleResource grantee) throws RequestValidationException, RequestExecutionException {
    if (getRoles(grantee, true).contains(role))
        throw new InvalidRequestException(String.format("%s is a member of %s", grantee.getRoleName(), role.getRoleName()));
    if (getRoles(role, true).contains(grantee))
        throw new InvalidRequestException(String.format("%s is a member of %s", role.getRoleName(), grantee.getRoleName()));
    modifyRoleMembership(grantee.getRoleName(), role.getRoleName(), "+");
    process(String.format("INSERT INTO %s.%s (role, member) values ('%s', '%s')", SchemaConstants.AUTH_KEYSPACE_NAME, AuthKeyspace.ROLE_MEMBERS, escape(role.getRoleName()), escape(grantee.getRoleName())), consistencyForRoleWrite(role.getRoleName()));
}
55337.53416cassandra
private void addPermissionsForRole(Set<Permission> permissions, IResource resource, RoleResource role) throws RequestExecutionException, RequestValidationException {
    QueryOptions options = QueryOptions.forInternalCalls(authReadConsistencyLevel(), Lists.newArrayList(ByteBufferUtil.bytes(role.getRoleName()), ByteBufferUtil.bytes(resource.getName())));
    ResultMessage.Rows rows = select(authorizeRoleStatement, options);
    UntypedResultSet result = UntypedResultSet.create(rows.result);
    if (!result.isEmpty() && result.one().has(PERMISSIONS)) {
        for (String perm : result.one().getSet(PERMISSIONS, UTF8Type.instance)) {
            permissions.add(Permission.valueOf(perm));
        }
    }
}
56221.14417cassandra
public Set<Permission> authorize(AuthenticatedUser user, IResource resource) {
    try {
        if (user.isSuper())
            return resource.applicablePermissions();
        Set<Permission> permissions = EnumSet.noneOf(Permission.class);
        for (Role role : user.getRoleDetails()) addPermissionsForRole(permissions, resource, role.resource);
        return permissions;
    } catch (RequestExecutionException | RequestValidationException e) {
        logger.debug("Failed to authorize {} for {}", user, resource);
        throw new UnauthorizedException("Unable to perform authorization of permissions: " + e.getMessage(), e);
    }
}
57120.0218cassandra
private static Properties loadProperties() {
    Properties properties = new Properties();
    InputStream stream = AuditTrigger.class.getClassLoader().getResourceAsStream(AUDIT_PROPERTIES_FILE_NAME);
    try {
        properties.load(stream);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        FileUtils.closeQuietly(stream);
    }
    return properties;
}
58101.58317cassandra
public boolean commit() throws LoginException {
    if (!succeeded) {
        return false;
    } else {
        principal = new CassandraPrincipal(username);
        if (!subject.getPrincipals().contains(principal))
            subject.getPrincipals().add(principal);
        cleanUpInternalState();
        commitSucceeded = true;
        return true;
    }
}
59174.17313cassandra
public static void assertHashWithoutKeyspace(PreparedStatement statement, String queryString, String ks) {
    MD5Digest returned = id(statement);
    if (!returned.equals(hashWithoutKeyspace(queryString, ks))) {
        if (returned.equals(hashWithKeyspace(queryString, ks)))
            throw new AssertionError(String.format("Got hash with keyspace from the cluster: %s, should have gotten %s", returned, hashWithoutKeyspace(queryString, ks)));
        else
            throw new AssertionError(String.format("Got unrecognized hash: %s", returned));
    }
}
60216.22111cassandra
public void querySuccess(CQLStatement statement, String query, QueryOptions options, QueryState state, long queryTime, Message.Response response) {
    AuditLogEntry entry = new AuditLogEntry.Builder(state).setType(statement.getAuditLogContext().auditLogEntryType).setOperation(query).setTimestamp(queryTime).setScope(statement).setKeyspace(state, statement).setOptions(options).build();
    log(entry);
}
61272.4110cassandra
public Collection<Mutation> augment(Partition update) {
    TableMetadata metadata = Schema.instance.getTableMetadata(auditKeyspace, auditTable);
    PartitionUpdate.SimpleBuilder audit = PartitionUpdate.simpleBuilder(metadata, TimeUUID.Generator.nextTimeUUID());
    audit.row().add("keyspace_name", update.metadata().keyspace).add("table_name", update.metadata().name).add("primary_key", update.metadata().partitionKeyType.getString(update.partitionKey().getKey()));
    return Collections.singletonList(audit.buildAsMutation());
}
62203.959cassandra
public Map<String, Serializable> toMap() {
    HashMap<String, Serializable> ret = new HashMap<>();
    if (entry.getKeyspace() != null)
        ret.put("keyspace", entry.getKeyspace());
    if (entry.getOperation() != null)
        ret.put("operation", entry.getOperation());
    if (entry.getScope() != null)
        ret.put("scope", entry.getScope());
    if (entry.getUser() != null)
        ret.put("user", entry.getUser());
    return ret;
}
63151.2749cassandra
private IAuditLogger getAuditLogger(AuditLogOptions options) throws ConfigurationException {
    final ParameterizedClass logger = options.logger;
    if (logger != null && logger.class_name != null) {
        return FBUtilities.newAuditLogger(logger.class_name, logger.parameters == null ? Collections.emptyMap() : logger.parameters);
    }
    return new BinAuditLogger(options);
}
64177.8728cassandra
public void batchSuccess(BatchStatement.Type batchType, List<? extends CQLStatement> statements, List<String> queries, List<List<ByteBuffer>> values, QueryOptions options, QueryState state, long queryTime, Message.Response response) {
    List<AuditLogEntry> entries = buildEntriesForBatch(statements, queries, state, options, queryTime);
    for (AuditLogEntry auditLogEntry : entries) {
        log(auditLogEntry);
    }
}
6576.1129cassandra
public static void assertStable(PreparedStatement first, PreparedStatement subsequent) {
    if (!id(first).equals(id(subsequent))) {
        throw new AssertionError(String.format("Subsequent id (%s) is different from the first one (%s)", id(first), id(subsequent)));
    }
}
66120.9317cassandra
public void prepareFailure(@Nullable CQLStatement stmt, @Nullable String query, QueryState state, Exception cause) {
    AuditLogEntry entry = new AuditLogEntry.Builder(state).setOperation(query).setType(AuditLogEntryType.PREPARE_STATEMENT).build();
    log(entry, cause);
}
6797.6717cassandra
public void authFailure(QueryState state, Exception cause) {
    AuditLogEntry entry = new AuditLogEntry.Builder(state).setOperation("LOGIN FAILURE").setType(AuditLogEntryType.LOGIN_ERROR).build();
    log(entry, cause);
}
6889.8636cassandra
 static boolean isFiltered(String input, Set<String> includeSet, Set<String> excludeSet) {
    if (!excludeSet.isEmpty() && excludeSet.contains(input))
        return true;
    return !(includeSet.isEmpty() || includeSet.contains(input));
}
6955.3527cassandra
public static synchronized MD5Digest compute(byte[] toHash) {
    cachedDigest.reset();
    return MD5Digest.wrap(cachedDigest.digest(toHash));
}
7050.7216cassandra
public void onDropFunction(UDFunction function) {
    DatabaseDescriptor.getAuthorizer().revokeAllOn(FunctionResource.function(function.name().keyspace, function.name().name, function.argTypes()));
}
7125.2727cassandra
private void log(AuditLogEntry logEntry, Exception e, List<String> queries) {
    AuditLogEntry.Builder builder = new AuditLogEntry.Builder(logEntry);
    if (e instanceof UnauthorizedException) {
        builder.setType(AuditLogEntryType.UNAUTHORIZED_ATTEMPT);
    } else if (e instanceof AuthenticationException) {
        builder.setType(AuditLogEntryType.LOGIN_ERROR);
    } else {
        builder.setType(AuditLogEntryType.REQUEST_FAILURE);
    }
    builder.appendToOperation(obfuscatePasswordInformation(e, queries));
    log(builder.build());
}
7233.6916cassandra
public static void init() {
    AuthCacheService.instance.register(permissionsCache);
    AuthCacheService.instance.register(networkPermissionsCache);
    cidrAuthorizer.initCaches();
}
7339.025cassandra
 static void create(AuditLogEntry entry) {
    if (isEnabled(entry.getType()))
        DiagnosticEventService.instance().publish(new AuditEvent(entry));
}
7424.015cassandra
public boolean authenticate(InetAddress remoteAddress, int remotePort, Certificate[] certificates, InternodeConnectionDirection connectionType) {
    return true;
}
7539.014cassandra
public static boolean equalsToHashWithKeyspace(byte[] digest, String queryString, String ks) {
    return MD5Digest.wrap(digest).equals(hashWithKeyspace(queryString, ks));
}
7624.024cassandra
public AuditLogOptions getAuditLogOptions() {
    return auditLogger.isEnabled() ? auditLogOptions : DatabaseDescriptor.getAuditLoggingOptions();
}
7718.0914cassandra
private static MD5Digest id(PreparedStatement statement) {
    return statement.getPreparedId().boundValuesMetadata.id;
}
7813.9314cassandra
public String getSource() {
    return entry.getSource().toString(true);
}
7910.014cassandra
public void log(AuditLogEntry logMessage) {
    AuditEvent.create(logMessage);
}
808.014cassandra
public Map<String, Object> getMetadata() {
    return metadata;
}
814.7514cassandra
public Enum<?> getType() {
    return entry.getType();
}
822.015cassandra
public IAuditLogger getLogger() {
    return auditLogger;
}
833.014cassandra
public long getChecksum() {
    return this.digest.getValue();
}
842.014cassandra
public AuditLogEntry getEntry() {
    return entry;
}
854.013elasticsearch
public void setPurge(boolean purge) {
    this.purge = purge;
}
863.013elasticsearch
public boolean getFailIfUnavailable() {
    return this.failIfUnavailable.get();
}
872.013cassandra
public byte[] toArrayCopy() {
    return delegate.toArrayCopy();
}
884.7512elasticsearch
public void testExplicitModuleEmbeddedJarVersion9() throws Exception {
    testExplicitModuleEmbeddedJarVersionSpecific(9);
}
8928.0711cassandra
public Supplier<O> supplySync(SerializableSupplier<O> call) {
    return () -> waitOn(supplyAsync(call).get());
}
9023.2611cassandra
 static ReplicaMap<InetAddressAndPort> endpointMap(ReplicaList list) {
    return new ReplicaMap<>(list, Replica::endpoint);
}
9115.5111cassandra
private static ThrowingFunction<IOException, V, RuntimeException> nulls() {
    return ignore -> null;
}
9212.6811cassandra
public static ActionList of(Action... actions) {
    return new ActionList(actions);
}
938.011cassandra
public long serializedSize(Void v, int version) {
    return 0;
}
945.011hadoop
public Path getPath() {
    return new Path(path);
}
954.011cassandra
protected DecoratedKey applyToPartitionKey(DecoratedKey key) {
    return key;
}
963.011hadoop
public EventType getEventType() {
    return EventType.JOB_SUBMITTED;
}
972.011cassandra
public void pauseConnection() {
    pausedConnections.incrementAndGet();
}